blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a9b08d396841136195b73cc21064a5ed3377c65
|
6eee8d0fba684826613f01fbc307219255fd6beb
|
/Policy_Gradient_Softmax/run_robotFetchReach.py
|
3b22395d804e7a86a77a3133d3173e9517ea6404
|
[] |
no_license
|
liheng12345/RL
|
847a6508d687d0f3b381485dc5a900698a84aeac
|
60fbcea8b60e47793d066880c3def13cce5ffdbb
|
refs/heads/main
| 2023-05-01T14:34:53.058256
| 2021-05-20T08:57:20
| 2021-05-20T08:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
import gym
from PGS_model import PolicyGradient
import matplotlib.pyplot as plt
#DISPLAY_REWARD_THRESHOLD = 5000
RENDER = True
env = gym.make('CartPole-v0')
env.seed(1)
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = PolicyGradient(
n_actions = env.action_space.n,
n_features = env.observation_space.shape[0],
learning_rate = 0.02,
reward_decay= 0.99,
output_graph= True
)
for i_episode in range(3000):
observation = env.reset()
while True:
if RENDER : env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
RL.store_transition(observation, action, reward)
if done:
ep_rs_sum = sum(RL.ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward*0.99 + ep_rs_sum * 0.01
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True
print('episode:', i_episode, " reward:", int(running_reward))
vt = RL.learn()
if i_episode == 0:
plt.plot(vt)
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_
|
[
"noreply@github.com"
] |
liheng12345.noreply@github.com
|
8c016d6e81d4f8aa2af12226ade161145e7b6f0c
|
a01d41c134bb92ed9179ff1ff5aa97a722973684
|
/AutoTestpass/projects/PrinterControl/po/pages/android/Page_MyFile.py
|
6497ee16870239d581f6983e127f4082baf4aeb8
|
[] |
no_license
|
Kate-zhongxin/AutoTestpass
|
65b152bb193d6a70b7ef44c1d61bd1a72880f469
|
d066f6b699c8c1cda49af6c2411cdca5609fef6e
|
refs/heads/master
| 2020-04-10T12:19:27.425239
| 2018-10-27T05:52:55
| 2018-10-27T05:52:55
| 161,018,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
# coding: utf-8
from projects.PrinterControl.po.models.android.Page_MyFile_model import Page_MyFile_model
'''MyFile page.'''
class Page_MyFile(Page_MyFile_model):
def __init__(self, UI):
self.UI = UI
if 1 > 1:
from fwk.object.AndroidFwk import AndroidFwk
self.UI = AndroidFwk
Page_MyFile_model.__init__(self)
# This is function template of how to write your Business Logic.
def example(self):
pass
# self.checkbox_accept().waitForShown().click()
# self.Pages.Page_endUserLicenseAgreement.text_always().clickIfPresent().wait(1)
# self.button_continue().click()
# self.button_search().waitForShown(60).click().wait(1)
# self.Edit_search().setValue("10.10.63.128")
# self.text_printerIp().relocateByText("10.10.63.128").click()
# self.image_appIcon().verifyIsShown()
# A few elements' properties may be changed after a while. It should be searched again by using clearForRefinding()
# self.image_appIcon().waitForShown().clearForRefinding().click()
# self.text_version().verifyEqual(self.text_version().getValue(), "4.3.19")
|
[
"474308199@qq.com"
] |
474308199@qq.com
|
481436780741170de647cc988d139a4b34d2979d
|
c0264f7a462f3fe4bba4ee110a51f49cfb018ab0
|
/homework-5/predict1.py
|
08ffe6ce1148eaee411275d57f789cbe3cddab77
|
[] |
no_license
|
m-wambua/homework-5
|
467cd2713be23be3577876e3dcd25dcf0bcfae34
|
45f8bb3d733a7e1b2e7821e385910fc28d840291
|
refs/heads/main
| 2023-08-05T23:15:32.693043
| 2021-10-11T13:56:53
| 2021-10-11T13:56:53
| 415,949,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
import pickle
from flask import Flask
from flask import request
from flask import jsonify
model_file= 'model_C=1.0.bin'
with open(model_file,'rb') as f_in:
dv,model=pickle.load(f_in)
app = Flask('churn')
@app.route('/predict1',methods = ['POST'])
def predict():
customer =request.get_json()
X=dv.transform([customer])
y_pred =model.predict_proba(X)[0,1]
churn = y_pred>=0.5
result ={
'churn_probability':float(y_pred),
'churn': bool(churn)
}
return jsonify(result)
if __name__ == "__main__":
app.run(debug='0.0.0.0',port=5000)
|
[
"noreply@github.com"
] |
m-wambua.noreply@github.com
|
3e475b499dd37802b646062be8b0061132819906
|
b678b3a1f794f180f00b1414bfbd8f3442984443
|
/GanNetwork.py
|
ae89821d1323de6bee3bb202ea245999f714b0ed
|
[] |
no_license
|
nekomiao123/medical
|
fe2401c31bbe1a617c16e86786745518c6d39698
|
62054bc4d92909315bf7f4157eeb5b85ca845c29
|
refs/heads/master
| 2023-06-25T23:44:27.440281
| 2021-07-28T13:21:28
| 2021-07-28T13:21:28
| 375,958,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,058
|
py
|
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, down=True, use_act=True, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, padding_mode="reflect", **kwargs)
if down
else nn.ConvTranspose2d(in_channels, out_channels, **kwargs),
nn.InstanceNorm2d(out_channels),
nn.ReLU(inplace=True) if use_act else nn.Identity()
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.block = nn.Sequential(
ConvBlock(channels, channels, kernel_size=3, padding=1),
ConvBlock(channels, channels, use_act=False, kernel_size=3, padding=1),
)
def forward(self, x):
return x + self.block(x)
class Generator(nn.Module):
def __init__(self, img_channels, num_features = 64, num_residuals=9):
super().__init__()
self.initial = nn.Sequential(
nn.Conv2d(img_channels, num_features, kernel_size=7, stride=1, padding=3, padding_mode="reflect"),
nn.InstanceNorm2d(num_features),
nn.ReLU(inplace=True),
)
self.down_blocks = nn.ModuleList(
[
ConvBlock(num_features, num_features*2, kernel_size=3, stride=2, padding=1),
ConvBlock(num_features*2, num_features*4, kernel_size=3, stride=2, padding=1),
]
)
self.res_blocks = nn.Sequential(
*[ResidualBlock(num_features*4) for _ in range(num_residuals)]
)
self.up_blocks = nn.ModuleList(
[
ConvBlock(num_features*4, num_features*2, down=False, kernel_size=3, stride=2, padding=1, output_padding=1),
ConvBlock(num_features*2, num_features*1, down=False, kernel_size=3, stride=2, padding=1, output_padding=1),
]
)
self.last = nn.Conv2d(num_features*1, img_channels, kernel_size=7, stride=1, padding=3, padding_mode="reflect")
def forward(self, x):
x = self.initial(x)
for layer in self.down_blocks:
x = layer(x)
x = self.res_blocks(x)
for layer in self.up_blocks:
x = layer(x)
return torch.tanh(self.last(x))
class Block(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 4, stride, 1, bias=True, padding_mode="reflect"),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, x):
return self.conv(x)
class Discriminator(nn.Module):
def __init__(self, in_channels=3, features=[64, 128, 256, 512]):
super().__init__()
self.initial = nn.Sequential(
nn.Conv2d(
in_channels,
features[0],
kernel_size=4,
stride=2,
padding=1,
padding_mode="reflect",
),
nn.LeakyReLU(0.2, inplace=True),
)
layers = []
in_channels = features[0]
for feature in features[1:]:
layers.append(Block(in_channels, feature, stride=1 if feature==features[-1] else 2))
in_channels = feature
layers.append(nn.Conv2d(in_channels, 1, kernel_size=4, stride=1, padding=1, padding_mode="reflect"))
self.model = nn.Sequential(*layers)
def forward(self, x):
x = self.initial(x)
return torch.sigmoid(self.model(x))
def test():
img_channels = 3
img_height = 288
img_weight = 512
x = torch.randn((1, img_channels, img_height, img_weight))
gen = Generator(img_channels, 64)
print(gen(x).shape)
x = torch.randn((5, 3, 288, 512))
dis = Discriminator(in_channels=3)
preds = dis(x)
print(preds.shape)
if __name__ == "__main__":
test()
|
[
"447140443@qq.com"
] |
447140443@qq.com
|
c4acd6784d5e7292721ab27fa3e0b14a93af92ba
|
1aca3f447c14fe3e191ce8770b5532f3a5c57caf
|
/StaticCoders/asgi.py
|
9a7da1989fd4ad8fe7ae0313230975a048feb146
|
[] |
no_license
|
sanmiljadhav/Django-Blog
|
994b70118310591e33689912ed94797e4b096c87
|
a6d4e55b552549bc2c16944b817b6e99da8b2b07
|
refs/heads/master
| 2023-04-22T04:14:02.403740
| 2021-05-07T16:37:34
| 2021-05-07T16:37:34
| 365,290,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for StaticCoders project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StaticCoders.settings')
application = get_asgi_application()
|
[
"sanmil.jadhav@gmail.com"
] |
sanmil.jadhav@gmail.com
|
de6d9bdba05bffa7668b1d69cfae8baa06748436
|
c88bf2bbb01b0c42c845fd2ed749e4dfab47b321
|
/Stack.py
|
baf5f108a7062da6563b9876099b8ed0fbde86ad
|
[] |
no_license
|
PhaniSantosh21/Sorting
|
8d8d436c24a632169b48d123460843978b0a760b
|
bfadc4c4f241bb1585d53c8cd6ba05dfd9abe18b
|
refs/heads/master
| 2020-03-06T14:56:31.046305
| 2018-07-28T19:22:02
| 2018-07-28T19:22:02
| 126,945,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
class stack:
arr = []
stackLen = 5
def push(self, a):
if(len(self.arr) == self.stackLen):
print("Stack is full")
else:
self.arr.append(a)
def pop(self):
if(self.arr == []):
print("Stack is empty")
else:
self.arr.pop()
def printStack(self):
print(self.arr)
a = stack()
a.push(1)
a.push(2)
a.push(3)
a.push(4)
a.push(5)
a.push(6)
a.printStack()
a.pop()
a.printStack()
a.pop()
a.pop()
a.pop()
a.pop()
a.pop()
|
[
"noreply@github.com"
] |
PhaniSantosh21.noreply@github.com
|
c6c0c0ec05add7dbe4ef9213cc81551cc0cbbafa
|
0a4f3e1a3a99eea168d4146f5eb3f02b88ea01b4
|
/source/conf.py
|
32280e4604ce3dda9fe8965fb987d8ab9b66bf0a
|
[
"CC-BY-4.0"
] |
permissive
|
m4sk1n-weblate/lubuntu-manual
|
4ace3ca9a0df1a27d55a474bc79439bdef8b5971
|
816877150d0971baf4ecc62986ad6ab61b5ac32e
|
refs/heads/master
| 2021-09-04T21:32:05.033331
| 2018-01-22T11:38:55
| 2018-01-22T11:38:55
| 118,484,253
| 1
| 0
| null | 2018-01-22T16:35:27
| 2018-01-22T16:35:27
| null |
UTF-8
|
Python
| false
| false
| 11,252
|
py
|
# -*- coding: utf-8 -*-
import sphinx_bootstrap_theme
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
html_logo = "manual64.png"
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lubuntu Manual'
copyright = u'2016, Lubuntu Team'
author = u'Lubuntu Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'16.10'
# The full version, including alpha/beta/rc tags.
release = u'16.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Lubuntu Manual vbeta1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "manual.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'LubuntuManualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LubuntuManual.tex', u'Lubuntu Manual Documentation',
u'Lubuntu Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lubuntumanual', u'Lubuntu Manual Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LubuntuManual', u'Lubuntu Manual Documentation',
author, 'LubuntuManual', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
epub_language = "en"
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
locale_dirs = ['po/']
gettext_compact = False
language = "en"
|
[
"tsimonq2@ubuntu.com"
] |
tsimonq2@ubuntu.com
|
c71de68d5d8e1ed94307b087f795dddfc08ddc00
|
7b8b03b7818a1fea58f174ff8c18b43578a6233f
|
/tests/core/test_models.py
|
b6c868d65b928963cc11299b613fc8c6b8eeec36
|
[] |
no_license
|
defance/coins_ph
|
400e4316a2d9a63752b21190ca7f1b0543b85343
|
2f0d3038f5dcca4c0f8711a1b095c6078799eb0b
|
refs/heads/master
| 2020-04-30T19:15:58.398453
| 2019-03-21T22:30:16
| 2019-03-21T22:30:16
| 177,033,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django.test import TestCase
from tests.factories import AccountFactory
class TestTransactionAccount(TestCase):
def test_update_balance(self):
account = AccountFactory(balance=10)
account.update_balance(42)
account.refresh_from_db()
self.assertEquals(account.balance, 52)
|
[
"defance@gmail.com"
] |
defance@gmail.com
|
cc5c8ee8b919667a4e0c92b4fc9a5ca78bfa5604
|
44701119c8331bb8e9256f2afa730978ecbb93ae
|
/rassPaper.py
|
06d1a2ff4c418bec0df2b19ab66dccd445b9fdff
|
[] |
no_license
|
martydingo/rassPaper
|
d3e1e6df888a4b70f509fcf2683ce2e088066354
|
bd3368c277142e55ba372390e3a8a9cb1af6b25a
|
refs/heads/master
| 2023-06-15T01:53:31.314096
| 2021-07-09T17:38:18
| 2021-07-09T17:38:18
| 384,506,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
from waveshare_epd import epd2in13_V2
class rassPaper:
def __init__(self):
self.display = epd2in13_V2.EPD()
self.clearScreen()
def clearScreen(self):
self.display.init(self.display.FULL_UPDATE)
self.display.Clear(0xFF)
self.display.sleep()
if(__name__=='__main__'):
try:
rassPaper = rassPaper()
rassPaper.clearScreen()
except KeyboardInterrupt:
epd2in13_V2.epdconfig.module_exit()
exit()
|
[
"marty@dingo.sh"
] |
marty@dingo.sh
|
c92f4be0b081b00cb58aecee72325a357a5d2afb
|
971d5e787113700c20ea3c87a5065ebf405d4820
|
/plot.py
|
ae83daa7ee6fc489de383657ddea64a37f0261f3
|
[] |
no_license
|
shashankn91/unsupervised-learning
|
2c1d5d76fb12868b1a90830fd6869960b393a7ab
|
6d37c328d01be66a66f5ab50baecf43fb30244e9
|
refs/heads/master
| 2020-04-04T17:03:14.815181
| 2018-11-04T17:01:58
| 2018-11-04T17:01:58
| 156,104,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
out = './{}/'.format(sys.argv[1])
df = pd.read_csv(out + 'digits2D.csv')
Z = pd.DataFrame(df['target'])
reduced_data = df.drop(['target'], axis = 1)
h = .02
x_min, x_max = df['x'].min() - 1, df['x'].max() + 1
y_min, y_max = df['y'].min() - 1, df['y'].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
centers = df.groupby('target')['x', 'y'].mean()
colors = ['b','g','r','c','m','y','k','w']
def func(x):
return colors[int(x%len(colors))]
pppp = df['target'].apply(func )
plt.scatter(df['x'], df['y'], c=pppp,s=50, cmap='viridis')
plt.scatter(centers['x'], centers['y'], c='black', s=200, alpha=0.5)
plt.savefig(out + 'digits.png')
#Madelon Plot
df = pd.read_csv(out + 'madelon2D.csv')
Z = pd.DataFrame(df['target'])
reduced_data = df.drop(['target'], axis = 1)
h = .02
x_min, x_max = df['x'].min() - 1, df['x'].max() + 1
y_min, y_max = df['y'].min() - 1, df['y'].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
centers = df.groupby('target')['x', 'y'].mean()
pppp = df['target'].apply(func )
plt.scatter(df['x'], df['y'], c=pppp,s=50, cmap='viridis')
plt.scatter(centers['x'], centers['y'], c='black', s=200, alpha=0.5)
plt.savefig(out + 'madelon.png')
|
[
"shashank@fashtag.info"
] |
shashank@fashtag.info
|
bd11de2b5160f52fe4fc4a85912a099999ee72f1
|
33800418f82f5cd689e855d905a361285f9172d1
|
/mma/mmaapp/migrations/0002_auto_20151218_0534.py
|
6682ac47e86c4c6dd158d2a03684eb69edddadf7
|
[] |
no_license
|
FadiAlnabolsi/MMA_IR
|
94651735e43ef4ac042c2a47b4976bbaa34bfd28
|
89ac6423b35eaa2fc7549851723da39364e4f26d
|
refs/heads/master
| 2016-08-12T23:24:41.422356
| 2015-12-18T20:17:26
| 2015-12-18T20:17:26
| 48,214,756
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-18 05:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mmaapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='fights',
),
migrations.RemoveField(
model_name='fights',
name='temp',
),
migrations.AddField(
model_name='fights',
name='Event',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to='mmaapp.Event'),
preserve_default=False,
),
migrations.AddField(
model_name='fights',
name='Fighter1',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to='mmaapp.Fighters'),
preserve_default=False,
),
migrations.AddField(
model_name='fights',
name='Fighter2',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='opponent', to='mmaapp.Fighters'),
preserve_default=False,
),
]
|
[
"fadi@service.com"
] |
fadi@service.com
|
62e1d7b9b0866e34755d7d9147b6af4caa893cf8
|
a95aff2d2e0250704daac432e7dac2e6138733ae
|
/FacialRecognition/frec_env/bin/pip3
|
ba59b5f06d6242659902ee715bf87fece95963cb
|
[] |
no_license
|
josedev9/Additional-scripts
|
74b6b3bf9b5ba93f59b10fd6aacc938e2cfe0fca
|
aedefca7357225383c786b8917ab1119d6b436d2
|
refs/heads/main
| 2023-08-30T05:44:20.440601
| 2021-10-05T11:07:30
| 2021-10-05T11:07:30
| 388,692,450
| 0
| 1
| null | 2021-09-29T09:17:38
| 2021-07-23T05:55:11
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
#!/home/joser/Desktop/fac_rec/FacialRecognition/frec_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[joseangelrodrrodr@gmail.com]"
] |
[joseangelrodrrodr@gmail.com]
|
|
0b72ded122399b000404bb6e0054f0f87a596751
|
2ffb1e1eae3bf5eb2409db9a037572254ed8d051
|
/spatial.py
|
defebbd8481ca1145619cbd415c2b6699f9ca47e
|
[] |
no_license
|
a2a-research/automatic-gcp-detector
|
3037065e203c0ccfef853ce5358aba56f35e49a3
|
8dfe10a6859ca174a2302eb38e5cca398fb4bf43
|
refs/heads/master
| 2022-05-12T00:17:08.670239
| 2018-07-22T07:18:51
| 2018-07-22T07:18:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,238
|
py
|
"""
Helper library containing functions that perform spatial geometric operation
like determining if a point is inside a polygon
.. versionadded:: 0.1
.. codeauthor:: Nekhelesh Ramananthan <krnekhelesh@skylarkdrones.com>
**External Dependencies**
.. hlist::
:columns: 4
- geopy
- numpy
- scipy
- shapely
"""
# Copyright (C) 2017-2018 Skylark Drones
import math
from collections import OrderedDict
from typing import List, Tuple
import geopy.point
import numpy as np
from geopy.distance import distance, VincentyDistance
from scipy.spatial.qhull import Delaunay
from shapely.geometry import (
MultiPoint,
MultiLineString,
shape,
LineString,
Point,
Polygon,
)
from shapely.ops import cascaded_union, polygonize
def is_point_inside_polygon(point, polygon):
"""
.. codeauthor:: Nekhelesh Ramananthan <krnekhelesh@gmail.com>
Determines if a point is *inside* a given polygon or not. A polygon is
defined as a list of (x, y) tuples.
:param tuple point: Coordinate of point (x, y)
:param list(tuple) polygon: Set of points that form a polygon
[(x1, y1), (x2, y2) ...]
:return: If point is inside or outside the polygon
:rtype: bool
"""
polygon = Polygon(polygon)
point = Point(point)
return point.within(polygon)
def _add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points, if not in the list already
.. codeauthor:: Vaibhav Srinivasa <vaibhav@skylarkdrones.com>
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
def exterior_polygon_from_points(points, alpha=1000, buffer=0.0003):
"""
.. codeauthor:: Vaibhav Srinivasa <vaibhav@skylarkdrones.com>
Compute the exterior polygon (concave hull) of a set of points.
.. note::
The algorithm and workflow was derived from
http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/
.. note::
alpha = 1 / average distance between points in Y direction
Example Input: ::
[
{'Lat': 16.1235371256305, 'Lng': 75.27075953678545},
{'Lat': 16.2334347125635, 'Lng': 75.22123563678545}
]
:param list[dict] points: List of coordinate points.
:param int alpha: alpha value to influence the gooeyness of the border.
Smaller numbers don't fall inward as much as larger numbers. Too large,
and you lose everything!
:param float buffer: Amount of buffer to add/remove from the generated
polygon. Defaults to 0.0003 (30 meters)
:return: outer boundary of the concave hull of a set of points
[(x1, y1), (x2, y2) ...] and area
:rtype: tuple(list, float)
"""
data_points = []
for index, point in enumerate(points):
data = {
'properties': OrderedDict([('id', None)]),
'id': str(index),
'geometry': {
'type': 'Point',
'coordinates': (point['Lng'], point['Lat']),
},
'type': 'Feature',
}
data_points.append(data)
shape_points = [shape(points['geometry']) for points in data_points]
if len(shape_points) < 4:
# When you have a triangle, there is no sense in computing an alpha
# shape.
return MultiPoint(list(shape_points)).convex_hull
coords = np.array([point.coords[0] for point in shape_points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
# Semiperimeter of triangle
s = (a + b + c) / 2.0
# Area of triangle by Heron's formula
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# Here's the radius filter.
if circum_r < 1.0 / alpha:
_add_edge(edges, edge_points, coords, ia, ib)
_add_edge(edges, edge_points, coords, ib, ic)
_add_edge(edges, edge_points, coords, ic, ia)
m = MultiLineString(edge_points)
triangles = list(polygonize(m))
return (
list(cascaded_union(triangles).buffer(buffer).exterior.coords),
cascaded_union(triangles).area,
)
def calculate_initial_compass_bearing(start_point, end_point):
"""
.. codeauthor:: Nihal Mohan <nihal@skylarkdrones.com>
Calculates the initial compass bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2), cos(lat1).sin(lat2)
− sin(lat1).cos(lat2).cos(Δlong))
:param tuple start_point: Latitude and longitude for the first point in
decimal degrees
:param tuple end_point: Latitude and longitude for the second point in
decimal degrees
:return: The bearing in degrees
:rtype: float
"""
if any(not isinstance(point, tuple) for point in [start_point, end_point]):
raise TypeError(
"start_point and end_point must be a tuple of latitude "
"and longitude"
)
start_lat, start_lng = start_point
end_lat, end_lng = end_point
start_lat = math.radians(start_lat)
end_lat = math.radians(end_lat)
diff_lng = math.radians(end_lng - start_lng)
x = math.sin(diff_lng) * math.cos(end_lat)
y = math.cos(start_lat) * math.sin(end_lat) - (
math.sin(start_lat) * math.cos(end_lat) * math.cos(diff_lng)
)
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 only returns values
# from -180° to + 180° which is not what we want for a compass bearing.
# The solution is to normalize the initial bearing using modulo
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def interpolate_gps_positions(start_point, end_point, interpolate_ratio):
"""
.. codeauthor:: Nihal Mohan <nihal@skylarkdrones.com>
Function to interpolate between two GPS Coordinates by a ratio in 2D
Example Input: ::
{'Lat': 16.1235371256305, 'Lng': 75.27075953678545, 'Alt': 875.142},
{'Lat': 16.2334347125635, 'Lng': 75.22123563678545, 'Alt': 893.146},
0.75
:param dict start_point: Start point coordinates in decimal degrees
:param dict end_point: End point coordinates in decimal degrees
:param float interpolate_ratio: Ratio at which the interpolation should
happen from the start
:return: Latitude and longitude of the interpolated GPS points
:rtype: Tuple(float)
"""
if any(not isinstance(point, dict) for point in [start_point, end_point]):
raise TypeError(
"start_point and end_point inputs should be dictionaries."
" Refer to documentation!"
)
try:
interpolate_ratio = float(interpolate_ratio)
except ValueError:
raise TypeError(
'Interpolate ratio is required to be a floating value.'
' Conversion to float failed!'
)
else:
if interpolate_ratio > 1:
raise ValueError(
'Interpolate ratio should be Less than 1. '
'This is Interpolation. Not Extrapolation!'
)
distance_travelled = distance(
(start_point['Lat'], start_point['Lng']),
(end_point['Lat'], end_point['Lng']),
).meters
dist_required = distance_travelled * interpolate_ratio
start = geopy.point.Point(start_point['Lat'], start_point['Lng'])
d = VincentyDistance(meters=dist_required)
bearing = calculate_initial_compass_bearing(
(start_point['Lat'], start_point['Lng']),
(end_point['Lat'], end_point['Lng']),
)
interpolated_point = d.destination(point=start, bearing=bearing)
# Altitude interpolation if the altitudes were present in the argument
start_alt = start_point.get('Alt', None)
end_alt = end_point.get('Alt', None)
if start_alt is not None and end_alt is not None:
interpolated_alt = start_alt + (end_alt - start_alt) * interpolate_ratio
return (
interpolated_point.latitude,
interpolated_point.longitude,
interpolated_alt,
)
else:
return interpolated_point.latitude, interpolated_point.longitude
def split_line_by_length(line, max_length):
"""
.. codeauthor:: Nekhelesh Ramananthan <krnekhelesh@skylarkdrones.com>
.. versionadded:: Quark-0.2
Split a line into segments of lengths not more than maximum length
:param list(tuples) line: Line composed of points
:param float max_length: Maximum length of each segment
:return: Points that are separated by maximum length provided
:rtype: Shapely.MultiPoint
:raise ValueError: If total line length is less than the maximum length
of segment provided
"""
line_string = LineString(line)
total_line_length = 0
points = list(line_string.coords)
for index, point in enumerate(points):
if index < len(points) - 1:
x1, y1, z1 = point
x2, y2, z2 = points[index + 1]
total_line_length += distance((x1, y1), (x2, y2)).meters
if total_line_length < max_length:
raise ValueError(
'Total line length cannot be less than '
'the maximum length of segment provided'
)
splits = math.ceil(total_line_length / max_length)
return MultiPoint(
[
line_string.interpolate((i / splits), normalized=True)
for i in range(0, splits + 1)
]
)
def interpolate_coordinates_by_fixed_length(
coords: List[Tuple], interval: float
) -> List[Tuple]:
"""
.. codeauthor:: Shreehari Murali <hari@skylarkdrones.com>
.. versionadded:: Quark-0.2
Interpolates coordinates at fixed interval
:param list(tuples) coords: Line composed of Coordinates
:param float interval: The interval at which the linestring coordinates
have to be present
:return: Coordinates present at the input interval
:rtype: list(tuples)
"""
interpolated_list_of_coords = []
total_length = 0
if len(coords) == 0:
raise ValueError('Coordinates List is Empty!')
for index, coord in enumerate(coords):
if len(coord) == 3: # (lat, long, alt)
if index < len(coords) - 1:
lat1, long1, alt1 = coord
lat2, long2, alt2 = coords[index + 1]
dist = distance((lat1, long1), (lat2, long2)).meters
else: # (lat, long)
if index < len(coords) - 1:
lat1, long1 = coord
lat2, long2 = coords[index + 1]
dist = distance((lat1, long1), (lat2, long2)).meters
total_length += dist
if total_length > interval:
if len(coord) == 3:
line_string = split_line_by_length(coords, interval)
else:
new_list = []
for coord in coords:
lat, long = coord
new_list.append((lat, long, 0))
line_string = split_line_by_length(new_list, interval)
for coord in line_string:
interpolated_list_of_coords.append((coord.x, coord.y))
return interpolated_list_of_coords
else:
return coords
def buffer_line(
coords: List[Tuple], buffer_distance: float, interval: float, side: str
) -> List[Tuple]:
"""
.. codeauthor:: Shreehari Murali <hari@skylarkdrones.com>
.. versionadded:: Quark-0.2
Creates a linestring parallel to the given linestring at the given offset.
:param list(tuples) coords: Line composed of Coordinates in (lat, long) format
:param float buffer_distance: Parallel offset distance from the input linestring
:param float interval: The interval at which the linestring coordinates
have to be present
:param string side: 'right' or 'left' to the given linestring
:return: Coordinates buffered at the input interval distance
:rtype: list(tuples)
"""
buffered_linestring = (
LineString(coords)
.parallel_offset(buffer_distance * 0.00001, side)
.coords[:]
)
return interpolate_coordinates_by_fixed_length(
buffered_linestring, interval
)
def project_coord_on_line(point_coord: tuple, coords: List[Tuple]) -> Tuple:
"""
.. codeauthor:: Shreehari Murali <hari@skylarkdrones.com>
.. versionadded:: Quark-0.2
Takes in a coordinate and a linestring and gets the nearest coordinates
present on the linestring from the input coordinate
:param tuple point_coord: Coordinate to be projected in
(lat, long, alt) format
:param list(tuples) coords: Line containing 3d coordinates
:return: Coordinate in (lat,long,alt) format
:rtype: tuple
"""
projected_coord = 0
for index, coord in enumerate(coords):
if index == 0:
projected_coord = coord
else:
least_dist_from_point = distance(
(projected_coord[0], projected_coord[1]),
(point_coord[0], point_coord[1]),
).meters
dist_from_point = distance(
(coord[0], coord[1]), (point_coord[0], point_coord[1])
).meters
if dist_from_point < least_dist_from_point:
projected_coord = coord
return projected_coord
def get_point_at_given_length(length: float, coords: List[Tuple]) -> Tuple:
"""
.. codeauthor:: Shreehari Murali <hari@skylarkdrones.com>
.. versionadded:: Quark-0.2
Takes in the linestring containing coordinates and gets the coordinate
present at the input length
:param float length: The length of the line at which the coordinate has to
be obtained.
:param list(tuples) coords: Line containing coordinates in (lat, long, alt)
format.
:return: Coordinate present at the given length in (lat, long, alt) format.
:rtype: tuple
"""
current_length = 0
total_length_of_line = 0
if length < 0:
raise ValueError("Invalid Length input!")
for index, coord in enumerate(coords):
if index >= len(coords) - 1:
break
lat1, long1, alt1 = coord
lat2, long2, alt2 = coords[index + 1]
total_length_of_line += distance((lat1, long1), (lat2, long2)).meters
if length > total_length_of_line:
raise ValueError(
"Input Length is larger than " "the length of the line!"
)
else:
for index, coord in enumerate(coords):
if current_length > length:
break
lat1, long1, alt1 = coord
lat2, long2, alt2 = coords[index + 1]
current_length += distance((lat1, long1), (lat2, long2)).meters
return lat1, long1, alt1
def get_points_inside_polygon(
polygon_coords: List[Tuple], interval: float
) -> List[Tuple]:
"""
.. codeauthor:: Shreehari Murali <hari@skylarkdrones.com>
.. versionadded:: Quark-0.2
Takes in polygon composed of it's exterior coordinates and gets all the
points inside the polygon at the input interval
:param list(tuples) polygon_coords: polygon coords present in (lat, long)
format.
:param float interval: The interval at which the points have to be present
:return: Coordinates in (lat, long) format inside the given polygon.
:rtype: List(tuples)
"""
# bounding box coordinates
east = max(polygon_coords, key=lambda coords: coords[1])
west = min(polygon_coords, key=lambda coords: coords[1])
north = max(polygon_coords, key=lambda coords: coords[0])
south = min(polygon_coords, key=lambda coords: coords[0])
east_linestring = [(south[0], west[1], 0), (north[0], west[1], 0)]
interpol_east_linestring = interpolate_coordinates_by_fixed_length(
east_linestring, interval
)
dist = 0
lines = []
bounding_box_width = distance((east[1], 0), (west[1], 0)).meters
while dist <= bounding_box_width:
lines.append(
buffer_line(interpol_east_linestring, dist, interval, 'left')
)
dist += interval
if dist > bounding_box_width:
lines.append(
buffer_line(
interpol_east_linestring,
bounding_box_width,
interval,
'left',
)
)
points_inside_bounding_box = []
for line in lines:
for coord in line:
points_inside_bounding_box.append(coord)
points_inside_polygon = []
for point in points_inside_bounding_box:
if is_point_inside_polygon(point, polygon_coords):
points_inside_polygon.append(point)
return points_inside_polygon
|
[
"noreply@github.com"
] |
a2a-research.noreply@github.com
|
86e1b97b98349eb9e9cff28953b46b6184c6b6b3
|
3cffbd776e9d21d10759788aa3ee05be7da3dc84
|
/web/docker_django/urls.py
|
8c3a5330d949e1fd071bee41edbb3a16c7a00667
|
[] |
no_license
|
roy-rc/b2b
|
a482347d6848754c516e6ae06fac4ba1b340f3cb
|
65bc7d99132702a9170180602d43e66bbdce1587
|
refs/heads/master
| 2022-05-05T05:41:01.136381
| 2020-02-12T18:54:51
| 2020-02-12T18:54:51
| 191,446,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
from django.urls import include, re_path
from django.contrib import admin
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^', include('docker_django.products.urls')),
]
|
[
"roiman@mitocondria.cl"
] |
roiman@mitocondria.cl
|
3188d90a661c2d9b856f8af75351df705de6d1bf
|
26762585d08aa774af9f104472c97a8c7a9df181
|
/generators/v4d_super_station_2.py
|
1ff715d9c377df692ba6d0c4d8295bd62762d559
|
[] |
no_license
|
OxfordSKA/SKA1-low-layouts
|
379fbe5c056dc73706b1073f09e485880ecfa180
|
49e3ba2af4a447be38af03dde1d11898e3f8300b
|
refs/heads/master
| 2021-01-17T17:10:41.469929
| 2016-08-12T10:48:24
| 2016-08-12T10:48:24
| 47,823,977
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,178
|
py
|
"""Module to generate super-stations for trail v4d spec. layouts"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import matplotlib.pyplot as pyplot
import numpy
from numpy.random import rand
import shutil
import os
from os.path import join
from math import radians
def rotate_coords(x, y, angle):
"""Rotate array of x, y coordinates counter clockwise by angle, in deg."""
xr = x * numpy.cos(radians(angle)) - y * numpy.sin(radians(angle))
yr = x * numpy.sin(radians(angle)) + y * numpy.cos(radians(angle))
return xr, yr
def gridgen(num_points, diameter, min_dist, max_trials=1000):
def grid_position(x, y, scale, grid_size):
jx = int(round(x * scale)) + grid_size / 2
jy = int(round(y * scale)) + grid_size / 2
return jx, jy
def get_trail_position(r):
x = -r + 2.0 * r * rand()
y = -r + 2.0 * r * rand()
return x, y
# Grid size and scaling onto the grid
grid_size = min(100, int(round(float(diameter) / min_dist)))
grid_cell = float(diameter) / grid_size # Grid sector cell size
scale = 1.0 / grid_cell # Scaling onto the sector grid.
check_width = 1
r = diameter / 2.0 # Radius
r_sq = r**2 # Radius, squared
min_dist_sq = min_dist**2 # minimum distance, squared
r_ant = min_dist / 2.0
# Pre-allocate coordinate arrays
x = numpy.zeros(num_points)
y = numpy.zeros(num_points)
# Grid meta-data
grid_i_start = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_i_end = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_count = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_i_next = numpy.zeros(num_points, dtype='i8')
n = num_points
n_req = num_points
num_tries = 0
try_count = list()
for j in range(n_req):
done = False
while not done:
# Generate a trail position
xt, yt = get_trail_position(r)
rt = (xt**2 + yt**2)**0.5
# Check if the point is inside the diameter.
if rt + r_ant > r:
num_tries += 1
# Check if min distance is met.
else:
jx, jy = grid_position(xt, yt, scale, grid_size)
y0 = max(0, jy - check_width)
y1 = min(grid_size, jy + check_width + 1)
x0 = max(0, jx - check_width)
x1 = min(grid_size, jx + check_width + 1)
d_min = diameter # Set initial min to diameter.
for ky in range(y0, y1):
for kx in range(x0, x1):
if grid_count[kx, ky] > 0:
kh1 = grid_i_start[kx, ky]
for kh in range(grid_count[kx, ky]):
dx = xt - x[kh1]
dy = yt - y[kh1]
d_min = min((dx**2 + dy**2)**0.5, d_min)
kh1 = grid_i_next[kh1]
if d_min >= min_dist:
x[j] = xt
y[j] = yt
if grid_count[jx, jy] == 0:
grid_i_start[jx, jy] = j
else:
grid_i_next[grid_i_end[jx, jy]] = j
grid_i_end[jx, jy] = j
grid_count[jx, jy] += 1
try_count.append(num_tries)
num_tries = 0
done = True
else:
num_tries += 1
if num_tries >= max_trials:
n = j - 1
done = True
if num_tries >= max_trials:
break
if n < n_req:
x = x[0:n]
y = y[0:n]
return x, y, try_count
def gen_super_stations():
"""Generation 85 super-stations by rotation"""
# =========================================================================
num_super_stations = 85
num_stations_per_super_station = 6
max_tries_per_station = 5
diameter_gridgen = 40.0 # m
diameter = 35.0 # m
antenna_diameter = 1.5
num_ant_station_gridgen = 300
num_ant_station = 256
ss_diameter = 100.0
st_diameter = diameter
angles = numpy.arange(num_stations_per_super_station - 1) * \
(360.0 / float(num_stations_per_super_station - 1))
angles += 90.0
r0 = diameter
sx = r0 * numpy.cos(numpy.radians(angles))
sy = r0 * numpy.sin(numpy.radians(angles))
sx = numpy.insert(sx, 0, 0.0)
sy = numpy.insert(sy, 0, 0.0)
ss_model_dir = 'v4d_r_90m_180ant_ss_uniform.tm'
if os.path.isdir(ss_model_dir):
shutil.rmtree(ss_model_dir)
os.makedirs(ss_model_dir)
st_model_dir = 'v4d_r_90m_180ant_st_uniform.tm'
if os.path.isdir(st_model_dir):
shutil.rmtree(st_model_dir)
os.makedirs(st_model_dir)
ss_angles = -360.0 * numpy.random.random(num_super_stations) + 360.0
# =========================================================================
ss_ant_x = numpy.zeros((num_stations_per_super_station, num_ant_station))
ss_ant_y = numpy.zeros_like(ss_ant_x)
st_ant_x = numpy.zeros((num_stations_per_super_station, num_ant_station))
st_ant_y = numpy.zeros_like(st_ant_x)
ss_enu = numpy.zeros((num_ant_station * num_stations_per_super_station, 2))
st_enu = numpy.zeros((num_ant_station, 2))
# =========================================================================
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
fig1 = pyplot.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlabel('East [m]')
ax1.set_ylabel('North [m]')
ax1.grid()
ax1.set_xlim(-60, 60)
ax1.set_ylim(-60, 60)
line1, = ax1.plot([], [], 'k+')
label1 = ax1.text(0.02, 0.98, '', ha='left', va='top', style='italic',
color='k', transform=ax1.transAxes, fontsize='x-small')
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax1.add_artist(circle)
fig2 = pyplot.figure(figsize=(8, 8))
ax2 = fig2.add_subplot(111, aspect='equal')
ax2.set_xlabel('East [m]')
ax2.set_ylabel('North [m]')
ax2.grid()
ax2.set_xlim(-60, 60)
ax2.set_ylim(-60, 60)
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax2.add_artist(circle)
fig3 = pyplot.figure(figsize=(8, 8))
ax3 = fig3.add_subplot(111, aspect='equal')
ax3.set_xlabel('East [m]')
ax3.set_ylabel('North [m]')
ax3.grid()
ax3.set_xlim(-20, 20)
ax3.set_ylim(-20, 20)
line3, = ax3.plot([], [], 'k+')
label3 = ax3.text(0.02, 0.98, '', ha='left', va='top', style='italic',
color='k', transform=ax3.transAxes, fontsize='x-small')
circle = pyplot.Circle((0.0, 0.0), st_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax3.add_artist(circle)
fig4 = pyplot.figure(figsize=(8, 8))
ax4 = fig4.add_subplot(111, aspect='equal')
ax4.set_xlabel('East [m]')
ax4.set_ylabel('North [m]')
ax4.grid()
ax4.set_xlim(-20, 20)
ax4.set_ylim(-20, 20)
circle = pyplot.Circle((0.0, 0.0), st_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax4.add_artist(circle)
# =========================================================================
for i in range(num_super_stations):
print('== super station %i == : ' % i, end='')
for j in range(num_stations_per_super_station):
print('%i' % j, end='')
trial = 0
while trial < max_tries_per_station:
print('.', end='')
ax, ay, _ = gridgen(num_ant_station_gridgen, diameter_gridgen,
antenna_diameter, max_trials=10000)
if ax.shape[0] == num_ant_station_gridgen:
ar = (ax**2 + ay**2)**0.5
# Sort by radius
sort_idx = ar.argsort()
ax = ax[sort_idx]
ay = ay[sort_idx]
ax = ax[:num_ant_station]
ay = ay[:num_ant_station]
ss_ant_x[j, :] = ax + sx[j]
ss_ant_y[j, :] = ay + sy[j]
st_ant_x[j, :] = ax
st_ant_y[j, :] = ay
break
else:
trial += 1
continue
if trial == max_tries_per_station:
print()
print('Error, Failed to find enough antennas for station '
'%i/%i' % (ax.shape[0], num_ant_station_gridgen))
return
print()
# Rotate super-station
ss_ant_x, ss_ant_y = rotate_coords(ss_ant_x, ss_ant_y, ss_angles[i])
# Write station and super-station folders
station_dir = 'station%03i' % i
os.makedirs(join(ss_model_dir, station_dir))
ss_enu[:, 0] = ss_ant_x.flatten()
ss_enu[:, 1] = ss_ant_y.flatten()
station_file = join(ss_model_dir, station_dir, 'layout.txt')
numpy.savetxt(station_file, ss_enu, fmt='% -16.12f % -16.12f')
line1.set_data(ss_enu[:, 0], ss_enu[:, 1])
label1.set_text('super station %03i' % i)
fig1.savefig(join(ss_model_dir, 'station_%03i.png' % i))
ax2.plot(ss_enu[:, 0], ss_enu[:, 1], 'k+', alpha=0.1)
# Write station folders
for j in range(num_stations_per_super_station):
station_id = i * num_stations_per_super_station + j
station_dir = 'station%03i' % station_id
os.makedirs(join(st_model_dir, station_dir))
st_enu[:, 0] = st_ant_x[j, :].flatten()
st_enu[:, 1] = st_ant_y[j, :].flatten()
station_file = join(st_model_dir, station_dir, 'layout.txt')
numpy.savetxt(station_file, st_enu, fmt='% -16.12f % -16.12f')
# Plot station and add to station superposition
line3.set_data(st_enu[:, 0], st_enu[:, 1])
label3.set_text('station %03i' % station_id)
fig3.savefig(join(st_model_dir, 'station_%03i.png' % station_id))
ax4.plot(st_enu[:, 0], st_enu[:, 1], 'k+', alpha=0.1)
fig2.savefig(join(ss_model_dir, 'all_stations.png'))
fig4.savefig(join(st_model_dir, 'all_stations.png'))
ss_layout = numpy.zeros((num_super_stations, 3))
numpy.savetxt(join(ss_model_dir, 'layout.txt'), ss_layout,
fmt='%3.1f %3.1f %3.1f')
total_stations = num_super_stations * num_stations_per_super_station
st_layout = numpy.zeros((total_stations, 3))
numpy.savetxt(join(st_model_dir, 'layout.txt'), st_layout,
fmt='%3.1f %3.1f %3.1f')
if __name__ == '__main__':
gen_super_stations()
|
[
"benjamin.mort@oerc.ox.ac.uk"
] |
benjamin.mort@oerc.ox.ac.uk
|
4cd0d4c05c9d8088b4dad0adf66220da4e1665bf
|
a48a24d45b569f3ca11b1599009c11c161548edc
|
/src/models/schnorr_scheme_validator_model.py
|
3616865a61b8bc8cde78e08b2addfef9989d8f2f
|
[] |
no_license
|
lepesevichnikita/DSA
|
fdacd32ac8b10ed3f07b69bb458483d56642874a
|
b677755e825df8eb188a0453ff95d213abb17c8d
|
refs/heads/master
| 2020-05-22T13:31:49.891037
| 2019-05-26T14:37:17
| 2019-05-26T14:37:17
| 186,361,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject
from src.schnorr_scheme import SchnorrSchemeValidator
class SchnorrSchemeValidatorModel(QObject):
complexityChanged = pyqtSignal()
sChanged = pyqtSignal()
eChanged = pyqtSignal()
isValidChanged = pyqtSignal()
xChanged = pyqtSignal()
publicKeyChanged = pyqtSignal()
def __init__(self, parent: QObject = None):
super().__init__(parent)
self._schnorr_scheme_validator = SchnorrSchemeValidator()
self.xChanged.connect(self.gen_e)
self.xChanged.connect(self.isValidChanged)
self.sChanged.connect(self.isValidChanged)
@pyqtProperty(int, notify=complexityChanged)
def complexity(self) -> int:
return self._schnorr_scheme_validator.complexity
@complexity.setter
def complexity(self, complexity: int):
self._schnorr_scheme_validator.complexity = complexity
self.complexityChanged.emit()
@pyqtProperty(str, notify=sChanged)
def s(self) -> str:
return hex(self._schnorr_scheme_validator.s)
@s.setter
def s(self, s: str):
self._schnorr_scheme_validator.s = int(s, 16)
self.sChanged.emit()
@pyqtProperty(str, notify=eChanged)
def e(self) -> str:
return hex(self._schnorr_scheme_validator.e)
@pyqtSlot()
def init(self, x: str):
self.x = int(x, 16)
self.gen_e()
@pyqtSlot()
def gen_e(self):
prev_e = self._schnorr_scheme_validator.e
self._schnorr_scheme_validator.gen_e()
if prev_e != self._schnorr_scheme_validator.e:
self.eChanged.emit()
@pyqtProperty(str, notify=xChanged)
def x(self) -> str:
return hex(self._schnorr_scheme_validator.x)
@x.setter
def x(self, x: str):
self._schnorr_scheme_validator.x = int(x, 16)
self.xChanged.emit()
@pyqtProperty(list, notify=publicKeyChanged)
def keys(self) -> list:
return self._schnorr_scheme_validator.keys
@keys.setter
def keys(self, keys: list):
self._schnorr_scheme_validator.keys = keys
self.publicKeyChanged.emit()
@pyqtProperty(bool, notify=isValidChanged)
def is_valid(self) -> bool:
return self._schnorr_scheme_validator.is_valid
@pyqtSlot()
def gen_e(self):
self._schnorr_scheme_validator.gen_e()
self.eChanged.emit()
|
[
"lepesevich.nikita@yandex.com"
] |
lepesevich.nikita@yandex.com
|
0ad106e142946f1ade9edc6cfbe1e35446f4d07d
|
236141f0ece71073d0452ae5d0436a44843d16e7
|
/movieproject/movieapp/migrations/0002_movie_img.py
|
39fd6ffd551c26e58c47d6b5d41299e384db6337
|
[] |
no_license
|
Nikhil7736/movie
|
6437098c8c1202ada6ed0889c623b6bec9202fd5
|
e9cc8db9b84c89fd004d20da42608d22bc113efb
|
refs/heads/master
| 2023-07-28T16:13:53.895581
| 2021-09-11T11:10:29
| 2021-09-11T11:10:29
| 405,355,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
# Generated by Django 3.2.6 on 2021-08-27 05:19
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('movieapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='movie',
name='img',
field=models.ImageField(default=django.utils.timezone.now, upload_to='gallery'),
preserve_default=False,
),
]
|
[
"nikhilprasad61@gmail.com"
] |
nikhilprasad61@gmail.com
|
007b9d008b4040510b20cb5edf5341a7b17b6cfe
|
77f10c4d240fe9cc18874459509352b25db227f2
|
/methodclass/c3.py
|
c392cb7958883b2052368f8a791489ecce58855d
|
[] |
no_license
|
bhchen-0914/PythonCourse
|
f776c66ed3bbb564c3040c0060da3eec069e70df
|
b7a536ab0b3196af7d97245d9a12f90c3ee7df03
|
refs/heads/master
| 2023-05-24T05:27:18.848494
| 2021-06-19T09:51:52
| 2021-06-19T09:51:52
| 378,376,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
"""
用于测试__init__.py初始化效果
"""
def init_test():
print('this is c3 file')
|
[
"364293032@qq.com"
] |
364293032@qq.com
|
d5bf55cd95af3580caec5f9c2ec2353f31c95a68
|
616ca750df6e31054c1ad0fa1121555f36186965
|
/two pointers/validate_stack_sequences.py
|
619466b7dd9a557ca103fa2e936f4155cb5e859d
|
[] |
no_license
|
asset311/leetcode
|
d4cdc0af1967f08868c9df566353e324a606efbd
|
973ec60b9aea3522ca54fd0375505e4ad274ed3d
|
refs/heads/master
| 2022-12-14T02:37:36.764472
| 2020-08-27T16:56:18
| 2020-08-27T16:56:18
| 271,077,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
'''
946. Validate Stack Sequences
https://leetcode.com/problems/validate-stack-sequences/
'''
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
# use two pointers
stack = []
# second pointer traversing 'popped' array
pop_pointer = 0
# first pointer is traversing 'pushed' array
for num in pushed:
# push an element at each interation
stack.append(num)
# try to pop from the stack using 'popped' array
while stack and stack[-1] == popped[pop_pointer]:
stack.pop()
pop_pointer += 1
# need to make sure that we exhaused the pop_pointer
return pop_pointer == len(popped)
|
[
"32253546+Aseka311@users.noreply.github.com"
] |
32253546+Aseka311@users.noreply.github.com
|
3d94a4dfc9debe0d5e6bcdb0e806f2c379a11a70
|
294764af60107e4fd1d2f9c8b3c03b24dcfe6ee6
|
/items.py
|
423f726bf5d4eb1bc930b727aa994b755b63b21b
|
[] |
no_license
|
josephcorbin91/architectureDaily
|
e44b7c45f96f9438c707b7a6fbf776ec05f34268
|
140d45779782b39b6e9615807d69bfe18009ef62
|
refs/heads/master
| 2021-01-17T16:55:37.280514
| 2016-11-01T17:22:51
| 2016-11-01T17:22:51
| 69,365,110
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ArchitecturedailyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
id = scrapy.Field()
title = scrapy.Field()
city = scrapy.Field()
description = scrapy.Field()
video = scrapy.Field()
image = scrapy.Field()
link = scrapy.Field()
date = scrapy.Field()
like = scrapy.Field()
class ThreadItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
link = scrapy.Field()
threadDescription = scrapy.Field()
|
[
"noreply@github.com"
] |
josephcorbin91.noreply@github.com
|
8f0e75f7605fa4adae8aa901808c5e6ca7ee11d3
|
ff8bfdea7cf7fb3201c2e5ccca308a660193dfe7
|
/DecoraterClasses/Employer_Plan_decorator.py
|
dede9b487f81a8a5f69a57ffa8157349265b718a
|
[
"MIT"
] |
permissive
|
rohitgs28/FindMyEmployer
|
8d42e8fcde8c3ecc4654b37006f0feab045a75cf
|
d4b369eb488f44e40ef371ac09847f8ccc39994c
|
refs/heads/master
| 2022-12-10T17:19:16.953014
| 2018-09-07T06:44:38
| 2018-09-07T06:44:38
| 147,781,155
| 0
| 0
|
MIT
| 2022-12-08T02:23:39
| 2018-09-07T06:33:59
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
from EmployerDecorator import EmployerDecorator
class Employer_Plan_decorator(EmployerDecorator):
def __init__(self, employer):
super(Employer_Plan_decorator, self).__init__(employer)
def plan_rules(self,givenCount,userCount,messagePermission):
allow = False
givenCount = int(givenCount)
userCount = int(userCount)
if givenCount>userCount:
allow = True
elif givenCount<=userCount:
allow = False
return allow,messagePermission
|
[
"rohit.gs28@gmail.com"
] |
rohit.gs28@gmail.com
|
f800d6b3ca316df0db0ffe4717caaddae33260f8
|
3ea684487ef727fb2f8d16a030769f32a4f4003a
|
/datahq/apps/receiver/bootstrap.py
|
90c3fa6dc99fc38cd04840c76b173a531f02f9b5
|
[] |
no_license
|
adewinter/data-hq
|
5781e6669e0625ea9ae7cf94ec77c528485c2951
|
ca03656c835f8caa5156326500c05bb83ab931ca
|
refs/heads/master
| 2021-01-18T12:48:26.584454
| 2010-08-19T13:15:03
| 2010-08-19T13:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import os
from django.conf import settings
# make our directories if they're not there
for dir in [settings.RECEIVER_SUBMISSION_PATH,
settings.RECEIVER_ATTACHMENT_PATH,
settings.RECEIVER_EXPORT_PATH]:
if not os.path.isdir(dir):
os.mkdir(dir)
|
[
"czue@dimagi.com"
] |
czue@dimagi.com
|
75a3cd2e9f625d1e43a53e0412340d4ddac9a76a
|
9923e30eb99716bfc179ba2bb789dcddc28f45e6
|
/swagger-codegen/python/test/test_asset.py
|
4d4822c956810304a52b9be4b031c37a4dfaaa89
|
[] |
no_license
|
silverspace/samsara-sdks
|
cefcd61458ed3c3753ac5e6bf767229dd8df9485
|
c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa
|
refs/heads/master
| 2020-04-25T13:16:59.137551
| 2019-03-01T05:49:05
| 2019-03-01T05:49:05
| 172,804,041
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,361
|
py
|
# coding: utf-8
"""
Samsara API
# Introduction Samsara provides API endpoints for interacting with Samsara Cloud, so that you can build powerful applications and custom solutions with sensor data. Samsara has endpoints available to track and analyze sensors, vehicles, and entire fleets. The Samsara Cloud API is a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer) accessed by an [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) client such as wget or curl, or HTTP libraries of most modern programming languages including python, ruby, java. We use built-in HTTP features, like HTTP authentication and HTTP verbs, which are understood by off-the-shelf HTTP clients. We allow you to interact securely with our API from a client-side web application (though you should never expose your secret API key). [JSON](http://www.json.org/) is returned by all API responses, including errors. If you’re familiar with what you can build with a REST API, the following API reference guide will be your go-to resource. API access to the Samsara cloud is available to all Samsara administrators. To start developing with Samsara APIs you will need to [obtain your API keys](#section/Authentication) to authenticate your API requests. If you have any questions you can reach out to us on [support@samsara.com](mailto:support@samsara.com) # Endpoints All our APIs can be accessed through HTTP requests to URLs like: ```curl https://api.samsara.com/<version>/<endpoint> ``` All our APIs are [versioned](#section/Versioning). If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. # Authentication To authenticate your API request you will need to include your secret token. You can manage your API tokens in the [Dashboard](https://cloud.samsara.com). They are visible under `Settings->Organization->API Tokens`. Your API tokens carry many privileges, so be sure to keep them secure. Do not share your secret API tokens in publicly accessible areas such as GitHub, client-side code, and so on. Authentication to the API is performed via [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Provide your API token as the basic access_token value in the URL. You do not need to provide a password. ```curl https://api.samsara.com/<version>/<endpoint>?access_token={access_token} ``` All API requests must be made over [HTTPS](https://en.wikipedia.org/wiki/HTTPS). Calls made over plain HTTP or without authentication will fail. # Request Methods Our API endpoints use [HTTP request methods](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) to specify the desired operation to be performed. The documentation below specified request method supported by each endpoint and the resulting action. ## GET GET requests are typically used for fetching data (like data for a particular driver). ## POST POST requests are typically used for creating or updating a record (like adding new tags to the system). With that being said, a few of our POST requests can be used for fetching data (like current location data of your fleet). ## PUT PUT requests are typically used for updating an existing record (like updating all devices associated with a particular tag). ## PATCH PATCH requests are typically used for modifying an existing record (like modifying a few devices associated with a particular tag). ## DELETE DELETE requests are used for deleting a record (like deleting a tag from the system). # Response Codes All API requests will respond with appropriate [HTTP status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). Your API client should handle each response class differently. ## 2XX These are successful responses and indicate that the API request returned the expected response. ## 4XX These indicate that there was a problem with the request like a missing parameter or invalid values. Check the response for specific [error details](#section/Error-Responses). Requests that respond with a 4XX status code, should be modified before retrying. ## 5XX These indicate server errors when the server is unreachable or is misconfigured. In this case, you should retry the API request after some delay. # Error Responses In case of a 4XX status code, the body of the response will contain information to briefly explain the error reported. To help debugging the error, you can refer to the following table for understanding the error message. | Status Code | Message | Description | |-------------|----------------|-------------------------------------------------------------------| | 401 | Invalid token | The API token is invalid and could not be authenticated. Please refer to the [authentication section](#section/Authentication). | | 404 | Page not found | The API endpoint being accessed is invalid. | | 400 | Bad request | Default response for an invalid request. Please check the request to make sure it follows the format specified in the documentation. | # Versioning All our APIs are versioned. Our current API version is `v1` and we are continuously working on improving it further and provide additional endpoints. If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. Thus, you can use our current API version worry free. # FAQs Check out our [responses to FAQs here](https://kb.samsara.com/hc/en-us/sections/360000538054-APIs). Don’t see an answer to your question? Reach out to us on [support@samsara.com](mailto:support@samsara.com). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import samsara
from samsara.models.asset import Asset # noqa: E501
from samsara.rest import ApiException
class TestAsset(unittest.TestCase):
"""Asset unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAsset(self):
"""Test Asset"""
# FIXME: construct object with mandatory attributes with example values
# model = samsara.models.asset.Asset() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"greg@samsara.com"
] |
greg@samsara.com
|
dfae9c8d976ac7ac0f418b942f53ee151796d454
|
58ec0c399df3820c4662e45f72b1722e73d5999a
|
/lambda_function.py
|
319eba49f621172513e29f7652648872967276f4
|
[
"Apache-2.0"
] |
permissive
|
anjilinux/lambda-refarch-iotbackend
|
7e644743293a97520c5c33ae636523b2dce0fa4a
|
5fc6ec68380bbe5e1f294c19482971fba6de479b
|
refs/heads/master
| 2023-03-15T19:56:19.542814
| 2020-08-28T00:58:04
| 2020-08-28T00:58:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
import boto3
import os
sns = boto3.client('sns')
alertsns=os.environ['alert_sns']
def lambda_handler(event, context):
print(event)
deviceName=event['name']
humidity=event['humidity']
print("send alert")
alertmessage="Humidity "+ str(humidity) + " under threshold for device name "+ deviceName
snsresponse=sns.publish(TopicArn=alertsns,
Message=alertmessage,
Subject="Attention:Humidity Under Threshold")
return ''
|
[
"rajdsah@38f9d33c705d.ant.amazon.com"
] |
rajdsah@38f9d33c705d.ant.amazon.com
|
9cea75b600ef874d8dec1fc2c24dbca872f750d9
|
213c7de871594e436c1997078d41388adf6bbac5
|
/SortingAlg/plugins/Scripts/pip3.7-script.py
|
324bfba37e550ac93166c6231a6e3a5b6f24b42e
|
[] |
no_license
|
LennartPaciner/python-stuff
|
d8657c4f0904f5326c023adc23395d6123a7241f
|
554755ad96dfcae223849e7f29f65dfa4f5c1625
|
refs/heads/master
| 2021-06-24T09:06:19.009408
| 2021-05-29T09:31:37
| 2021-05-29T09:31:37
| 217,899,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
#!C:\Users\wombo\github-privat\python-stuff\SortingAlg\plugins\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"lpaciner@students.uni-mainz.de"
] |
lpaciner@students.uni-mainz.de
|
a0980b9644baf082aecd80772f09b050037a45e1
|
323bddba2caa2d1c0dc5c91e0de046a2d7f4a5f8
|
/src/vdb_to_numpy/utils/mesher.py
|
79108e9c228c22d1a5e6366b97c8ff2cc72c6157
|
[
"MIT"
] |
permissive
|
PRBonn/vdb_to_numpy
|
00b9206f64cbc545636c6a5307e7557bd798b675
|
d759b8cd26d5cba2cbdcc1b8261b7d918d9113cc
|
refs/heads/main
| 2023-07-05T20:37:02.075071
| 2023-01-02T06:37:34
| 2023-01-02T06:37:34
| 480,339,860
| 5
| 4
|
MIT
| 2023-06-29T12:49:48
| 2022-04-11T10:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
import numpy as np
import open3d as o3d
from skimage.measure import marching_cubes
def extract_mesh(volume, mask=None):
"""Run marching_cubes and extract a triangular mesh of the volume.
Parameters - copied from skimage -
----------
volume : (M, N, P) array
Input data volume to find isosurfaces. Will internally be
mask : (M, N, P) array
Boolean array. The marching cube algorithm will be computed only on
True elements. This will save computational time when interfaces
are located within certain region of the volume M, N, P-e.g. the top
half of the cube-and also allow to compute finite surfaces-i.e. open
surfaces that do not end at the border of the cube.
converted to float32 if necessary.
"""
vertices, faces, _, _ = marching_cubes(volume, level=0, mask=mask)
vertices = o3d.utility.Vector3dVector(vertices)
triangles = o3d.utility.Vector3iVector(faces)
mesh = o3d.geometry.TriangleMesh(vertices=vertices, triangles=triangles)
mesh.compute_vertex_normals()
return mesh
|
[
"ignaciovizzo@gmail.com"
] |
ignaciovizzo@gmail.com
|
19cc849f50ba984019a615ec3532eb04f622db66
|
3efee0cf2bd9e0c34bfdd94ab24a15cb88c04509
|
/PWEM_examples/kxky_bandstructure_benchmark_plotting_with_fdfd.py
|
20872b8c10843e1edc1184e3d3cbe5d7ee1b70bd
|
[
"MIT"
] |
permissive
|
luwl85/Rigorous-Coupled-Wave-Analysis
|
bf5016ec70525f5e7bf59dfa93a03902afdfac12
|
a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143
|
refs/heads/master
| 2023-04-25T20:46:45.397976
| 2021-05-20T22:17:54
| 2021-05-20T22:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
import sys
import os
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from mpl_toolkits.mplot3d import Axes3D
matlab_data =os.path.join('kxky_photonic_circle_bandstructure.mat');
mat = scipy.io.loadmat(matlab_data)
print(mat.keys())
wvlen_scan = np.squeeze(mat['wvlen_scan']);
omega_scan = 1/wvlen_scan;
ky_spectra = np.squeeze(mat['ky_spectra']);
print(ky_spectra.shape)
ky_scan = np.linspace(-np.pi, np.pi, 400);
X,Y = np.meshgrid(omega_scan, ky_scan);
print(X.shape)
#first dimension is ky... second dimension is kx...
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, np.real(ky_spectra[:,:,0]), marker='.')
plt.show();
|
[
"nzz2102@stanford.edu"
] |
nzz2102@stanford.edu
|
8eeda9eafbd385126da3185c7429ce42e6a1a598
|
3a2921780f62433e6c486cf59dab3fa9ba3f8a52
|
/tethys/startup.py
|
406e75fb67f2e9d8c2130ed51a3ed76ef7a1da47
|
[
"MIT"
] |
permissive
|
JosePedroMatos/Tethys
|
8295a7fa27a5c93291765e16593acc5c1bd49574
|
e4bedfb9051f2f7fb665b4aef856a6b1660521d4
|
refs/heads/master
| 2021-01-11T16:08:23.887999
| 2017-01-25T15:50:51
| 2017-01-25T15:50:51
| 80,015,953
| 2
| 0
| null | 2017-01-25T15:50:52
| 2017-01-25T13:16:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,084
|
py
|
'''
Created on 29 Oct 2016
Responsible for checking and including default database entries into Tethys.
'''
import os
from Crypto.Random import random
from timeSeries.models import DataProvider, DataType, Colormap
from django.contrib.auth.models import User
from django.core.files import File
def prepareTethys():
try:
staticDir = os.path.dirname(os.path.abspath(__file__))
# Add tethys user (no special privileges)
tmp = User.objects.filter(username='tethys')
if len(tmp)==0:
key = random.getrandbits(64*8)
user = User.objects.create_user('tethys', 'tethys@some.url', 'SomePassword' + str(random.getrandbits(256)))
user.save()
else:
user = tmp[0]
# Add satellite aggregation data type
tmp = DataType.objects.filter(abbreviation='Sat', name='Satellite data aggregation')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'satellite.png')), 'rb') as f:
dataType = DataType(abbreviation='Sat',
name='Satellite data aggregation',
units='undefined',
description='Automatic data type created to define the sum of satellite data.',
observations='Units for this data type are not defined. They depend on the units of each specific satellite data product.',
introducedBy=user,
)
dataType.icon.save('satellite.png', File(f), save=True)
dataType.save()
# Add local data provider
tmp = DataProvider.objects.filter(name='Tethys')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'tethys.png')), 'rb') as f:
dataProvider = DataProvider(abbreviation='Tethys',
name='Tethys',
description='Automatic data provider created to define series produced locally.',
email='tethys@some.url',
introducedBy=user,
)
dataProvider.icon.save('tethys.png', File(f), save=True)
dataProvider.save()
# Add discharge data type
tmp = DataType.objects.filter(abbreviation='Q', name='Discharge')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'river.png')), 'rb') as f:
dataType = DataType(abbreviation='Q',
name='Discharge',
units='m3/s',
description='River discharge in m3/s',
observations=None,
introducedBy=user,
)
dataType.icon.save('discharge.png', File(f), save=True)
dataType.save()
# Add water level data type
tmp = DataType.objects.filter(abbreviation='h', name='Water level')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'waterLevel.png')), 'rb') as f:
dataType = DataType(abbreviation='h',
name='Water Level',
units='m',
description='Water level in m (relative to the talweg)',
observations=None,
introducedBy=user,
)
dataType.icon.save('waterLevel.png', File(f), save=True)
dataType.save()
# Add colormap parula
tmp = Colormap.objects.filter(name='parula')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'colormap_parula.png')), 'rb') as f:
colormap = Colormap(name='parula',
introducedBy=user,
)
colormap.file.save('parula.png', File(f), save=True)
colormap.save()
# Add colormap jet
tmp = Colormap.objects.filter(name='jet')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'colormap_jet.png')), 'rb') as f:
colormap = Colormap(name='jet',
introducedBy=user,
)
colormap.file.save('colormap_jet.png', File(f), save=True)
colormap.save()
# Add colormap hot
tmp = Colormap.objects.filter(name='hot')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'colormap_hot.png')), 'rb') as f:
colormap = Colormap(name='hot',
introducedBy=user,
)
colormap.file.save('colormap_hot.png', File(f), save=True)
colormap.save()
# Add colormap hsv
tmp = Colormap.objects.filter(name='hsv')
if len(tmp)==0:
with open(os.path.normpath(os.path.join(staticDir, '..', 'extra', 'icons', 'colormap_hsv.png')), 'rb') as f:
colormap = Colormap(name='hsv',
introducedBy=user,
)
colormap.file.save('colormap_hsv.png', File(f), save=True)
colormap.save()
except Exception as ex:
print(str(ex))
|
[
"jpgscm@gmail.com"
] |
jpgscm@gmail.com
|
ef0ef5c0ce92555256715da547ac40faeea34fb3
|
8a519b5e5cc1a03471dfe7bc016f993f40b1a585
|
/MAY13_LCS.py~
|
d4f0ddb599cc75421ee6d2625a1d2fa5f3823fdd
|
[] |
no_license
|
saikiranboga/coding-scripts
|
7c12a8f0d3303de74eeb6cebaf45496f8699253b
|
c95c8da731897521555d29e8bc14848aa22d8e4f
|
refs/heads/master
| 2020-12-24T15:22:24.752105
| 2013-07-04T03:58:39
| 2013-07-04T03:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
#!/usr/bin/python
if __name__ == "__main__":
|
[
"saikiranboga110792@gmail.com"
] |
saikiranboga110792@gmail.com
|
|
921f5f114fdee39bcc021e670ff6794d79a144dc
|
94e134baa379584521ccaa8a634f36b662cad382
|
/recursos/migrations/0005_auto_20210127_1519.py
|
e3aefbbc0b4882e90151fafa54a10d6222546ee4
|
[] |
no_license
|
alvaromateus/monitor_recursos
|
cb142d2bf2cb1c55e3481efb89b8f803d149489d
|
94de62193cb652d62297686d6e312ce80e3cc07b
|
refs/heads/master
| 2023-02-24T14:35:29.115530
| 2021-01-27T22:55:15
| 2021-01-27T22:55:15
| 329,765,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# Generated by Django 3.1.5 on 2021-01-27 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recursos', '0004_auto_20210127_1449'),
]
operations = [
migrations.AlterField(
model_name='registro',
name='clock_processador',
field=models.CharField(max_length=255, null=True),
),
]
|
[
"alvaromateus@gmail.com"
] |
alvaromateus@gmail.com
|
d56f61c624424aa16dbda193d9b44aa754b4c235
|
5e4b8f263c07694c05bbc0e4fbaeb3f9f479d40e
|
/lab-bmplib/lab-bmplib/compile
|
5307157d40cf8720dc3aca1c793d132c9dfc2dad
|
[] |
no_license
|
BrianHerron-exe/CS103
|
2a2391c70e18989ec6dd8c579d30a6187de1b6eb
|
8bc0f2dff053531e1869938ce9aefd01d8d0bafa
|
refs/heads/master
| 2021-06-15T21:51:23.810121
| 2017-04-05T04:27:17
| 2017-04-05T04:27:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
#!/usr/bin/python
#-*- mode: python -*-
"""
Usage:
compile foo # runs clang++ <options> foo.cpp -o foo
compile foo.cpp # same as previous
compile foo.cpp bar.cpp baz.o -o bar # clang++ <options> <args>
For installation instructions, see
http://bits.usc.edu/cs103/compile/
Contact: dpritcha@usc.edu
"""
def compile_convert(args, warn = True):
if len(args) == 1:
arg = args[0]
if arg.endswith(".cpp"):
arg = arg[:-4]
args = [arg+".cpp", "-o", arg]
prefix = ["clang++", "-g"]
if warn:
prefix += ["-Wall", "-Wvla", "-Wshadow", "-Wunreachable-code",
"-Wconversion",
"-Wno-shorten-64-to-32", "-Wno-sign-conversion",
"-Wno-sign-compare", "-Wno-write-strings"]
# this was going to be added Fall 2015 but is probably too experimental
# "-fsanitize=undefined"
# it's worth considering -fsanitize=address or -fsanitize=memory too
# except that they are not compatible with valgrind :(
return prefix + args
if __name__ == "__main__":
import os, sys
if len(sys.argv) <= 1:
print("Error in 'compile': no arguments given")
else:
os.system(" ".join(compile_convert(sys.argv[1:])))
|
[
"bellathu@usc.edu"
] |
bellathu@usc.edu
|
|
4aea8cc4695fedfd388451908d4491da7819fc1a
|
f95102d0d572780bdafbbf309e181aa2107c5d82
|
/q.py
|
d442a34f09dc3599d523110e480f719406a61812
|
[] |
no_license
|
auimendoza/cs7641-omscs-a4
|
02982111367476f987880435b4708f5b2d14617a
|
6b43070c655f4ea876c519270aec90331018a164
|
refs/heads/master
| 2020-04-07T11:31:19.195803
| 2018-11-20T04:20:10
| 2018-11-20T04:20:10
| 157,265,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,390
|
py
|
import gym
import numpy as np
from mdptoolbox.mdp import ValueIteration
from mdplib import PolicyIteration, QLearning
import common
import sys
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
def Usage():
print("Usage:")
print("python %s <envid> <exploreinterval>" % (sys.argv[0]))
sys.exit(1)
if len(sys.argv) != 3:
Usage()
mdpid = "q"
mdpname = "Q Learning"
envid = int(sys.argv[1])
exploreinterval = int(sys.argv[2])
maxiter = 2000000
interval = 250000
gammas = [0.1, 0.3, 0.5, 0.7, 0.9]
episodes = 1000
alphas = [0.1, 0.3, 0.5, 0.7, 0.9]
env, envname, P, R, actions, actions2 = common.getEnv(envid)
print("* %s *" % (envname))
print(mdpname)
aiters = []
aghls = []
ats = []
amd = []
bestgoal = 0
bestpolicy = None
bestpolicyparams = {}
print("Running ...")
for alpha in alphas:
iters = []
ghls = []
ts = []
md = []
for gamma in gammas:
#print("gamma: %.1f, alpha: %s" % (gamma, str(alpha)))
sys.stdout.write('.')
func = QLearning(P, R, gamma, maxiter, interval, alpha, exploreinterval)
func.run()
ighl = []
its = []
for i, policy in enumerate(func.policies):
timesteps, gtimesteps, ghl = common.runPolicy(env, episodes, policy)
if ghl[0] > bestgoal:
bestgoal = ghl[0]
bestpolicy = policy
bestpolicyparams['gamma'] = gamma
bestpolicyparams['alpha'] = alpha
bestpolicyparams['iterations'] = func.iterations[i]
bestpolicyparams['elapsedtime'] = func.elapsedtimes[i]
bestpolicyparams['meangtimesteps'] = np.mean(gtimesteps)
ighl.append(ghl)
its.append(np.mean(timesteps))
iters.append(func.iterations)
ghls.append(ighl)
ts.append(its)
md.append(func.mean_discrepancy)
aiters.append(iters)
aghls.append(ghls)
ats.append(ts)
amd.append(md)
# plot best policy
textsize = 12
if envid == 0:
textsize = 20
print("== best policy ==")
print("explore interval = %d" % (exploreinterval))
print("goals = %d" % (bestgoal))
print("gamma = %.1f" % (bestpolicyparams['gamma']))
print("alpha = %.1f" % (bestpolicyparams['alpha']))
print("iterations = %d" % (bestpolicyparams['iterations']))
print("elapsed time = %.3f" % (bestpolicyparams['elapsedtime']))
print("mean timesteps to goal = %.3f" % (bestpolicyparams['meangtimesteps']))
print("=================")
common.printNicePolicy(env, bestpolicy, actions2, textsize, "%s: Best Policy\n%s" % (mdpname, envname), "%d-%s-bestpolicy.png" % (envid, mdpid))
print(bestpolicy)
# plot iterations, params vs goal/discrepancy
iterations = aiters[0][0]
ghls = []
aghl = np.array(aghls)
for i in range(aghl.shape[0]):
for j in range(aghl.shape[1]):
for k in range(aghl.shape[2]):
ghls.append([alphas[i], gammas[j], iterations[k], aghl[i,j,k,0], aghl[i,j,k,1], aghl[i,j,k,2], np.array(amd)[i,j,k], np.array(ats)[i,j,k]])
ghlpd = pd.DataFrame(ghls, columns=['alpha', 'gamma', 'i', 'goal', 'hole', 'lost', 'md', 'ts'])
ghlpd['param'] = ghlpd.apply(lambda row: 'alpha=' + str(row['alpha']) + ', gamma=' + str(row['gamma']), axis=1)
g = sns.FacetGrid(ghlpd, col="alpha", hue="gamma", col_wrap=3, legend_out=False)
g = g.map(plt.plot, "i", "goal", marker=".")
g.add_legend()
g.set_xlabels('iterations')
g.set_ylabels('goals')
g.fig.subplots_adjust(top=0.7)
g.fig.suptitle('Goals After %d Episodes\n%s' % (episodes, envname))
g.set_xticklabels(rotation=90)
plt.gcf()
plt.savefig("%d-%s-%d-goal-it.png" % (envid, mdpid, exploreinterval), bbox_inches="tight")
plt.close()
g = sns.FacetGrid(ghlpd, col="alpha", hue="gamma", col_wrap=3, legend_out=False)
g = g.map(plt.plot, "i", "md", marker=".")
g.add_legend()
g.set_xlabels('iterations')
g.set_ylabels('Q mean discrepancy')
g.fig.subplots_adjust(top=0.7)
g.fig.suptitle('Q Mean Discrepancy\n%s' % (envname))
g.set_xticklabels(rotation=90)
plt.gcf()
plt.savefig("%d-%s-%d-md-it.png" % (envid, mdpid, exploreinterval), bbox_inches="tight")
plt.close()
g = sns.FacetGrid(ghlpd, col="alpha", hue="gamma", col_wrap=3, legend_out=False)
g = g.map(plt.plot, "i", "ts", marker=".")
g.add_legend()
g.set_xlabels('iterations')
g.set_ylabels('timesteps')
g.fig.subplots_adjust(top=0.7)
g.fig.suptitle('Timesteps to Goal or Hole\n%s' % (envname))
g.set_xticklabels(rotation=90)
g.set(ylim=(0,200))
plt.gcf()
plt.savefig("%d-%s-%d-ts-it.png" % (envid, mdpid, exploreinterval), bbox_inches="tight")
plt.close()
|
[
"auimendoza@gmail.com"
] |
auimendoza@gmail.com
|
bbab4019507f7f9140e39b56dbd67484ab09ce33
|
ce0fb1feddf3178426ee130a0fb2150131858fc0
|
/src/preprocess.py
|
fcf546d3fcb6d43e98dad0817c90f3c037ad1c46
|
[] |
no_license
|
carsonzchen/MSiA423_ACE.AI
|
d6b466ec81b468c8dc7e204d2d45814945ed124b
|
ac381613e8f978a74ca4c04d7ff5add0e3a3c740
|
refs/heads/master
| 2020-05-07T21:37:00.605451
| 2019-06-13T04:48:44
| 2019-06-13T04:48:44
| 180,911,498
| 1
| 1
| null | 2019-05-23T05:57:35
| 2019-04-12T02:04:56
|
Python
|
UTF-8
|
Python
| false
| false
| 8,058
|
py
|
import numpy as np
import pandas as pd
import argparse
import yaml
from src.helpers.helpers import read_raw, save_dataset, setFeatureType, fillColumnNAs
import logging
logger = logging.getLogger(__name__)
def trim_columns(df, columnlist):
"""
Return dataframe that has selected columns with leading or trailing spaces removed
:param df (pandas dataframe): input pandas dataframe
:param columnlist (list): list of column names of the pandas dataframe to trim
:return: a pandas dataframe with selected columns trimmed
"""
for column in columnlist:
if df[column].dtypes == 'O':
df[column] = df[column].str.strip()
return df
def select_columns(df, columnlist):
"""
Return only selected columns for the dataframe
:param df (pandas dataframe): input pandas dataframe
:param columnlist (list): list of column names of the pandas dataframe to include
:return: a pandas dataframe with only selected columns
"""
if set(columnlist).issubset(df.columns):
df_part = df[columnlist]
df_part = df_part.reset_index()
logger.info(df_part.head(2))
return df_part
else:
raise ValueError("Error: columns not entirely found in the dataset")
def gen_rankings_static(df):
"""
Generate static ranking of ATP players based on their ranking of the last game they played.
This is different from the gynamic ranking published by ATP each week.
:param df (pandas dataframe): input pandas dataframe
:return: a pandas dataframe with player name and their static rank
"""
df_columns_needed = ['Winner', 'Loser', 'Date', 'WRank', 'LRank']
if set(df_columns_needed).issubset(set(df.columns)) == False:
logger.error("%s not in main dataset", str(df_columns_needed))
raise ValueError("Required columns not present")
else:
# Last available rank when players are match winners
rank1 = df.groupby(['Winner']).agg({'Date':'max'})\
.merge(df[['Winner', 'Date', 'WRank']], on = ['Winner', 'Date'], how = 'inner')\
.rename(columns={"Winner": "Player", "WRank": "Rank"})
# Last availale rank when players are match losers
rank2 = df.groupby(['Loser']).agg({'Date':'max'})\
.merge(df[['Loser', 'Date', 'LRank']], on = ['Loser', 'Date'], how = 'inner')\
.rename(columns={"Loser": "Player", "LRank": "Rank"})
allranks = pd.concat([rank1, rank2]).reset_index(drop = True).sort_values(by=['Player','Date']).reset_index(drop = True)
allranks = allranks.reset_index()
allranks["date_rank"] = allranks.groupby("Player")["index"].rank(ascending=0,method='first')
# Preserve the rank of the latest date
ranktable = allranks[allranks["date_rank"] == 1][['Player', 'Rank']]
return ranktable.reset_index(drop = True)
def calculate_h2h(df):
"""
Create 4 head-to-head features for the main data:
'h2h_win': total number of wins in head-to-head records between 2 players
'h2h_lost': total number of losts in head-to-head records between 2 players
'totalPlayed': total number of matches between 2 players
'h2h_win_pct': percentage of winning in the head-to-head record between 2 players
Output the same exact table if the features already exist or required input columns are missing
:param df (pd.dataFrame): pandas dataframe containing input data
:return: df (pd.dataFrame): pandas dataframe containing processed features
"""
columns_needed = ['Winner', 'Loser']
if set(columns_needed).issubset(set(df.columns)):
h2hdf = df[columns_needed].assign(index = 1)
h2hwin = h2hdf.groupby(['Winner', 'Loser']).aggregate('count').reset_index()[['Winner', 'Loser', 'index']]
h2hwin.rename(columns={'index':'h2h_win'}, inplace=True)
h2hwin['h2h_win'] = h2hwin['h2h_win'].astype(float)
h2hlost = h2hwin.copy()
# Swap "winner" and "loser" to calculate head-to-head lost records
h2hlost.rename(columns={'h2h_win':'h2h_lost', 'Winner':'Loser', 'Loser':'Winner'}, inplace=True)
merged = h2hwin.merge(h2hlost, how='outer', on=['Winner', 'Loser'])
fillColumnNAs(merged, ['h2h_win', 'h2h_lost'])
merged['totalPlayed'] = merged['h2h_win'] + merged['h2h_lost']
merged['h2h_win_pct'] = merged['h2h_win']/merged['totalPlayed']
return merged
else:
logger.error("Error: required columns not entirely found in the dataset")
raise ValueError("Required columns not present")
def calculate_surface_winpct(df):
"""
Summarize each player's winning percentage and total matches played by surface
'surf_matches': Total number of matches played on the specific surface
'surf_winpct': Historical percentage of winning on the specific surface
:param df (pd.dataFrame): pandas dataframe containing input data
:return: df (pd.dataFrame): pandas dataframe containing generated features, grouped by player
and surface
"""
columns_needed = ['Winner', 'Loser', 'Surface']
if set(columns_needed).issubset(set(df.columns)) == False:
logger.error("%s not in dataset", str(columns_needed))
raise ValueError("Required columns not present")
else:
df = df[columns_needed].assign(index = 1)
surf_win = df.groupby(['Winner', 'Surface']).aggregate('count')\
.reset_index()[['Winner', 'Surface', 'index']]\
.rename(columns={'index':'totalWin'})
surf_lost = df.groupby(['Loser', 'Surface']).aggregate('count')\
.reset_index()[['Loser', 'Surface', 'index']]\
.rename(columns={'index':'totalLost'})
surface = surf_win.merge(surf_lost, how='outer', \
left_on=['Winner', 'Surface'], right_on=['Loser', 'Surface'])
surface.loc[(pd.isnull(surface.Winner), 'Winner')] = surface.Loser
fillColumnNAs(surface, ['totalWin','totalLost'])
surface['surf_matches'] = surface['totalWin'] + surface['totalLost']
surface['surf_winpct'] = surface['totalWin']/surface['surf_matches']
surface['Player'] = surface['Winner'].copy()
return surface[['Player', 'Surface', 'surf_matches', 'surf_winpct']]
def run_trimdata(args):
"""Orchestrates the trim data functionalities from commandline arguments."""
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
df = read_raw(**config["run_trimdata"]['read_raw'])
df_trim = trim_columns(df, **config["run_trimdata"]['trim_columns'])
save_dataset(df_trim, **config["run_trimdata"]['save_dataset'])
f.close()
return df_trim
def run_rankingstable(args):
"""Orchestrates the generation of rankings table from commandline arguments."""
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
df = read_raw(**config["run_rankingstable"]['read_raw'])
srank = gen_rankings_static(df)
save_dataset(srank, **config["run_rankingstable"]['save_dataset'])
f.close()
return srank
def run_h2h_record(args):
"""Orchestrates the generating of h2h records table from commandline arguments."""
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
df = read_raw(**config["run_h2h_record"]['read_raw'])
h2h_record = calculate_h2h(df)
save_dataset(h2h_record, **config["run_h2h_record"]['save_dataset'])
f.close()
return h2h_record
def run_surface_record(args):
"""Orchestrates the generating of surface win records table from commandline arguments."""
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
df = read_raw(**config["run_surface_record"]['read_raw'])
surface_record = calculate_surface_winpct(df)
save_dataset(surface_record, **config["run_surface_record"]['save_dataset'])
f.close()
return surface_record
|
[
"zijinchen2019@u.northwestern.edu"
] |
zijinchen2019@u.northwestern.edu
|
d4a9e5885c4f58e0f7e1c34b9e62a46a6cb72b66
|
2d36eecc0b123fb5c6b0d77036edffe3cba68e8c
|
/billsToPay/asgi.py
|
b79f83e7410d2fc0e9080c7223f088887baf5cd4
|
[] |
no_license
|
zwykuy-gracz/billstopay
|
e7ee60ab32ce9866223aa888d48f21821043e5ad
|
bcb8d6a181cfc29cedc1dbe8af50eee0cdaae78f
|
refs/heads/master
| 2023-07-31T02:48:46.095531
| 2021-09-16T08:56:38
| 2021-09-16T08:56:38
| 405,963,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for billsToPay project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'billsToPay.settings')
application = get_asgi_application()
|
[
"Paul.Specjal@intrum.com"
] |
Paul.Specjal@intrum.com
|
6821205dff8d4bf5af67bd99c4b092e8d390a3c3
|
5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982
|
/python/pymonad/monad_parse.py
|
a37f2195d1412f89bfddadf9d4bb469858d0db09
|
[] |
no_license
|
philzook58/python
|
940c24088968f0d5c655e2344dfa084deaefe7c6
|
6d43db5165c9bcb17e8348a650710c5f603e6a96
|
refs/heads/master
| 2020-05-25T15:42:55.428149
| 2018-05-14T03:33:29
| 2018-05-14T03:33:29
| 69,040,196
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# parser is of form string -> [(symbol, therestofstring), (other possiblity), (other possiblity), ...]
#parserbind needsto return
def parsebind(parser , parserproducer):
def combinerparser(string):
possibleparses = parser(string)
for (symb, restofstring) in possibleparses:
return combinedparser
|
[
"philip@FartMachine7.local"
] |
philip@FartMachine7.local
|
1f08fdcb74ad6a4d0aa6e04cbcf96f1ef20e4d48
|
05bbf828f376844b26c5cee143cb4a85f07f3827
|
/examples/test_case_management_v1_examples.py
|
bc98561d1d7a6583e9a781ea56243c6a1143e4df
|
[
"Apache-2.0"
] |
permissive
|
KRuelY/platform-services-python-sdk
|
b8da1a499d9ea13f9dfac88686b0490df9a269c5
|
71a24742c28de4bdee9bfcc53e66c8741746d089
|
refs/heads/main
| 2023-06-26T02:46:06.227936
| 2021-06-29T15:32:29
| 2021-06-29T15:32:29
| 390,108,509
| 0
| 0
|
Apache-2.0
| 2021-07-27T19:49:14
| 2021-07-27T19:49:13
| null |
UTF-8
|
Python
| false
| false
| 11,253
|
py
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Examples for CaseManagementV1
"""
import io
import os
import pytest
from ibm_cloud_sdk_core import ApiException, read_external_sources
from ibm_platform_services.case_management_v1 import *
#
# This file provides an example of how to use the Case Management service.
#
# The following configuration properties are assumed to be defined:
#
# CASE_MANAGEMENT_URL=<service url>
# CASE_MANAGEMENT_AUTH_TYPE=iam
# CASE_MANAGEMENT_AUTH_URL=<IAM token service URL - omit this if using the production environment>
# CASE_MANAGEMENT_APIKEY=<IAM apikey>
# CASE_MANAGEMENT_RESOURCE_CRN=<CRN of resource to use in examples>
#
# These configuration properties can be exported as environment variables, or stored
# in a configuration file and then:
# export IBM_CREDENTIALS_FILE=<name of configuration file>
#
config_file = 'case_management.env'
case_management_service = None
config = None
case_number = None
attachment_id = None
resource_crn = None
##############################################################################
# Start of Examples for Service: CaseManagementV1
##############################################################################
# region
class TestCaseManagementV1Examples():
"""
Example Test Class for CaseManagementV1
"""
@classmethod
def setup_class(cls):
global case_management_service
if os.path.exists(config_file):
os.environ['IBM_CREDENTIALS_FILE'] = config_file
# begin-common
case_management_service = CaseManagementV1.new_instance(
)
# end-common
assert case_management_service is not None
# Load the configuration
global config
config = read_external_sources(
CaseManagementV1.DEFAULT_SERVICE_NAME)
global resource_crn
resource_crn = config['RESOURCE_CRN']
print('Setup complete.')
needscredentials = pytest.mark.skipif(
not os.path.exists(config_file), reason='External configuration not available, skipping...'
)
@needscredentials
def test_create_case_example(self):
"""
create_case request example
"""
try:
print('\ncreate_case() result:')
# begin-createCase
offering_type = OfferingType(
group='crn_service_name',
key='cloud-object-storage'
)
offering_payload = Offering(
name='Cloud Object Storage',
type=offering_type
)
case = case_management_service.create_case(
type='technical',
subject='Example technical case',
description='This is an example case description. This is where the problem would be described.',
offering=offering_payload,
severity=4,
).get_result()
print(json.dumps(case, indent=2))
# end-createCase
assert case is not None
assert case['number'] is not None
global case_number
case_number = case['number']
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_case_example(self):
"""
get_case request example
"""
assert case_number is not None
try:
print('\nget_case() result:')
# begin-getCase
fields_to_return = [
GetCaseEnums.Fields.DESCRIPTION,
GetCaseEnums.Fields.STATUS,
GetCaseEnums.Fields.SEVERITY,
GetCaseEnums.Fields.CREATED_BY,
]
case = case_management_service.get_case(
case_number=case_number,
fields=fields_to_return
).get_result()
print(json.dumps(case, indent=2))
# end-getCase
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_cases_example(self):
"""
get_cases request example
"""
try:
print('\nget_cases() result:')
# begin-getCases
case_list = case_management_service.get_cases(
offset=0,
limit=100,
search='blocker',
sort=GetCasesEnums.Fields.UPDATED_AT,
).get_result()
print(json.dumps(case_list, indent=2))
# end-getCases
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_add_comment_example(self):
"""
add_comment request example
"""
assert case_number is not None
try:
print('\nadd_comment() result:')
# begin-addComment
comment = case_management_service.add_comment(
case_number=case_number,
comment='This is an example comment.'
).get_result()
print(json.dumps(comment, indent=2))
# end-addComment
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_add_watchlist_example(self):
"""
add_watchlist request example
"""
assert case_number is not None
try:
print('\nadd_watchlist() result:')
# begin-addWatchlist
watchlist_users = [
User(realm='IBMid', user_id='abc@ibm.com')
]
watchlist_add_response = case_management_service.add_watchlist(
case_number=case_number,
watchlist=watchlist_users,
).get_result()
print(json.dumps(watchlist_add_response, indent=2))
# end-addWatchlist
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_remove_watchlist_example(self):
"""
remove_watchlist request example
"""
assert case_number is not None
try:
print('\nremove_watchlist() result:')
# begin-removeWatchlist
watchlist_users = [
User(realm='IBMid', user_id='abc@ibm.com')
]
watchlist = case_management_service.remove_watchlist(
case_number=case_number,
watchlist=watchlist_users,
).get_result()
print(json.dumps(watchlist, indent=2))
# end-removeWatchlist
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_add_resource_example(self):
"""
add_resource request example
"""
assert case_number is not None
assert resource_crn is not None
try:
print('\nadd_resource() result:')
# begin-addResource
resource = case_management_service.add_resource(
case_number=case_number,
crn=resource_crn,
note='This resource is the service that is having the problem.',
).get_result()
print(json.dumps(resource, indent=2))
# end-addResource
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_upload_file_example(self):
"""
upload_file request example
"""
assert case_number is not None
try:
print('\nupload_file() result:')
# begin-uploadFile
example_file_content = b'This is the content of the file to upload.'
file_with_metadata_model = {
'data': io.BytesIO(example_file_content).getvalue(),
'filename': 'example.log',
'content_type': 'application/octet-stream',
}
files_to_upload = [file_with_metadata_model]
attachment = case_management_service.upload_file(
case_number=case_number,
file=files_to_upload,
).get_result()
print(json.dumps(attachment, indent=2))
# end-uploadFile
assert attachment is not None
assert attachment['id'] is not None
global attachment_id
attachment_id = attachment['id']
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_download_file_example(self):
"""
download_file request example
"""
assert case_number is not None
assert attachment_id is not None
try:
print('\ndownload_file() result:')
# begin-downloadFile
response = case_management_service.download_file(
case_number=case_number,
file_id=attachment_id,
)
file = response.get_result()
print(file.content)
# end-downloadFile
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_file_example(self):
"""
delete_file request example
"""
assert case_number is not None
assert attachment_id is not None
try:
print('\ndelete_file() result:')
# begin-deleteFile
attachment_list = case_management_service.delete_file(
case_number=case_number,
file_id=attachment_id
).get_result()
print(json.dumps(attachment_list, indent=2))
# end-deleteFile
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_case_status_example(self):
"""
update_case_status request example
"""
assert case_number is not None
try:
print('\nupdate_case_status() result:')
# begin-updateCaseStatus
status_payload_model = {
'action': 'resolve',
'comment': 'The problem has been resolved.',
'resolution_code': 1,
}
case = case_management_service.update_case_status(
case_number=case_number,
status_payload=status_payload_model
).get_result()
print(json.dumps(case, indent=2))
# end-updateCaseStatus
except ApiException as e:
pytest.fail(str(e))
# endregion
##############################################################################
# End of Examples for Service: CaseManagementV1
##############################################################################
|
[
"noreply@github.com"
] |
KRuelY.noreply@github.com
|
ecee1674e72267c285c73c6c53e20b11c7b039b3
|
2419dbbdeceb2776340e24e1190a49f0974cbd6f
|
/Preprocessing.py
|
e7404c6f9bf731629fef68231bcb588f298c986a
|
[] |
no_license
|
AumPC/Walmart_Forecasting
|
ecb508f6837b4ea759e64d0d9992e98986a67fa7
|
f0c9fe057e7e65773a7fd65efb378a3826dd8d74
|
refs/heads/master
| 2020-03-07T18:30:10.925770
| 2018-04-01T15:26:30
| 2018-04-01T15:26:30
| 127,640,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
import pandas as pd
features = pd.read_csv("data/features.csv")
test = pd.read_csv("data/test.csv")
train = pd.read_csv("data/train.csv")
stores = pd.read_csv("data/stores.csv")
train_data = pd.merge(train, stores)
train_data = pd.merge(merged_train , features)
test_data = test.merge(test.merge(stores, how='left', sort=False))
test_data = test_data.merge(test_data.merge(features, how='left', sort=False))
train_data.to_csv("data/train_data.csv", index = False)
test_data.to_csv("data/test_data.csv", index = False)
|
[
"blue-aum@live.com"
] |
blue-aum@live.com
|
58585528755e79552c9fb1ef6ccb42c3078b6450
|
13edd4b08d610170b1777d627e616636fe9c2ce4
|
/startup/80-settings.py
|
f78b61f70e1b990a4a262765b9106d53e8598e47
|
[] |
no_license
|
licode/profile_collection
|
1a53bfef31ebb638fca60e5c248c18a203659664
|
b67711626eaede033a11587b454cd7f4a1e7871b
|
refs/heads/master
| 2021-01-19T12:37:21.382510
| 2016-09-30T16:20:23
| 2016-09-30T16:20:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
from ophyd import EpicsSignal, EpicsSignalRO
#gs.DETS = []
#all_objs = globals()
#counters = [counter for counter in all_objs.values() if isinstance(counter, EpicsSignal)]
#gs.DETS = counters
"""
import logging
from ophyd.session import get_session_manager
sessionmgr = get_session_manager()
sessionmgr['olog_client'] = olog_client
print('These positioners are disconnected:')
print([k for k, v in sessionmgr.get_positioners().items() if not v.connected])
# metadata set at startup
gs.RE.md['owner'] = 'xf21id1'
gs.RE.md['group'] = 'esm'
gs.RE.md['beamline_id'] = 'ESM'
#gs.RE.md['custom'] = {}
def print_scanid(name, doc):
if name == 'start':
print('Scan ID:', doc['scan_id'])
print('Unique ID:', doc['uid'])
def print_md(name, doc):
if name == 'start':
print('Metadata:\n', repr(doc))
#gs.RE.subscribe('start', print_scanid)
#from ophyd.commands import wh_pos, log_pos, mov, movr
"""
|
[
"xf21id1@xf21id1-ws1.cs.nsls2.local"
] |
xf21id1@xf21id1-ws1.cs.nsls2.local
|
48cfa488723f8876386d29af748c3142f4adf3b5
|
2271ec0ecde1016cc75455ac4d9a32d1db5de518
|
/test/test_resources.py
|
7f6320438eac143c814b6b666c41d54c8d3bc373
|
[] |
no_license
|
faalkao/HotSpotAnalysis_Plugin
|
a344a36826346d24b29659b70300a96e90e5635d
|
185463c44892718c0a2aa7c608e238a12c3e3543
|
refs/heads/master
| 2022-01-15T22:12:37.164138
| 2017-02-24T09:20:19
| 2017-02-24T09:20:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'stanly.shaji@mail.polimi.it'
__date__ = '2016-06-19'
__copyright__ = 'Copyright 2016, Stanly Shaji, Arunkumar / Politecnico Di Milano'
import unittest
from PyQt4.QtGui import QIcon
class HotspotAnalysisDialogTest(unittest.TestCase):
"""Test rerources work."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/HotspotAnalysis/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
if __name__ == "__main__":
suite = unittest.makeSuite(HotspotAnalysisResourcesTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
[
"stanly3690@gmail.com"
] |
stanly3690@gmail.com
|
f5a9af21f3667cdee8a41a208bca46ea6b56f53f
|
9dffa36a0b2ae8598d9020d70ffed9b1cc209e84
|
/inverter_util.py
|
a6722fe6b6fc59a1836150165af4ba0183c60926
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
cebudding/Lab_Rotation_1
|
f5b9fc4bcdeb1aa673b329d97d1102c3a2bd67d3
|
5a731c9bef3a5b86436c1f1ceab2b8eb04b243a4
|
refs/heads/master
| 2020-09-28T21:31:21.413016
| 2019-12-09T13:10:01
| 2019-12-09T13:10:01
| 226,869,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,718
|
py
|
import torch
import numpy as np
import torch.nn.functional as F
from utils import pprint, Flatten
def module_tracker(fwd_hook_func):
"""
Wrapper for tracking the layers throughout the forward pass.
Args:
fwd_hook_func: Forward hook function to be wrapped.
Returns:
Wrapped method.
"""
def hook_wrapper(relevance_propagator_instance, layer, *args):
relevance_propagator_instance.module_list.append(layer)
return fwd_hook_func(relevance_propagator_instance, layer, *args)
return hook_wrapper
class RelevancePropagator:
"""
Class for computing the relevance propagation and supplying
the necessary forward hooks for all layers.
"""
# All layers that do not require any specific forward hooks.
# This is due to the fact that they are all one-to-one
# mappings and hence no normalization is needed (each node only
# influences exactly one other node -> relevance conservation
# ensures that relevance is just inherited in a one-to-one manner, too).
allowed_pass_layers = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.ReLU, torch.nn.ELU, Flatten,
torch.nn.Dropout, torch.nn.Dropout2d,
torch.nn.Dropout3d,
torch.nn.Softmax,
torch.nn.LogSoftmax,
torch.nn.Sigmoid)
# Implemented rules for relevance propagation.
available_methods = ["e-rule", "b-rule"]
def __init__(self, lrp_exponent, beta, method, epsilon, device):
self.device = device
self.layer = None
self.p = lrp_exponent
self.beta = beta
self.eps = epsilon
self.warned_log_softmax = False
self.module_list = []
if method not in self.available_methods:
raise NotImplementedError("Only methods available are: " +
str(self.available_methods))
self.method = method
def reset_module_list(self):
"""
The module list is reset for every evaluation, in change the order or number
of layers changes dynamically.
Returns:
None
"""
self.module_list = []
# Try to free memory
if self.device.type == "cuda":
torch.cuda.empty_cache()
def compute_propagated_relevance(self, layer, relevance):
"""
This method computes the backward pass for the incoming relevance
for the specified layer.
Args:
layer: Layer to be reverted.
relevance: Incoming relevance from higher up in the network.
Returns:
The
"""
if isinstance(layer,
(torch.nn.MaxPool1d, torch.nn.MaxPool2d, torch.nn.MaxPool3d)):
return self.max_pool_nd_inverse(layer, relevance).detach()
elif isinstance(layer,
(torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)):
return self.conv_nd_inverse(layer, relevance).detach()
elif isinstance(layer, torch.nn.LogSoftmax):
# Only layer that does not conserve relevance. Mainly used
# to make probability out of the log values. Should probably
# be changed to pure passing and the user should make sure
# the layer outputs are sensible (0 would be 100% class probability,
# but no relevance could be passed on).
if relevance.sum() < 0:
relevance[relevance == 0] = -1e6
relevance = relevance.exp()
if not self.warned_log_softmax:
pprint("WARNING: LogSoftmax layer was "
"turned into probabilities.")
self.warned_log_softmax = True
return relevance
elif isinstance(layer, self.allowed_pass_layers):
# The above layers are one-to-one mappings of input to
# output nodes. All the relevance in the output will come
# entirely from the input node. Given the conservation
# of relevance, the input is as relevant as the output.
return relevance
elif isinstance(layer, torch.nn.Linear):
return self.linear_inverse(layer, relevance).detach()
else:
raise NotImplementedError("The network contains layers that"
" are currently not supported {0:s}".format(str(layer)))
def get_layer_fwd_hook(self, layer):
"""
Each layer might need to save very specific data during the forward
pass in order to allow for relevance propagation in the backward
pass. For example, for max_pooling, we need to store the
indices of the max values. In convolutional layers, we need to calculate
the normalizations, to ensure the overall amount of relevance is conserved.
Args:
layer: Layer instance for which forward hook is needed.
Returns:
Layer-specific forward hook.
"""
if isinstance(layer,
(torch.nn.MaxPool1d, torch.nn.MaxPool2d, torch.nn.MaxPool3d)):
return self.max_pool_nd_fwd_hook
if isinstance(layer,
(torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)):
return self.conv_nd_fwd_hook
if isinstance(layer, self.allowed_pass_layers):
return self.silent_pass # No hook needed.
if isinstance(layer, torch.nn.Linear):
return self.linear_fwd_hook
else:
raise NotImplementedError("The network contains layers that"
" are currently not supported {0:s}".format(str(layer)))
@staticmethod
def get_conv_method(conv_module):
"""
Get dimension-specific convolution.
The forward pass and inversion are made in a
'dimensionality-agnostic' manner and are the same for
all nd instances of the layer, except for the functional
that needs to be used.
Args:
conv_module: instance of convolutional layer.
Returns:
The correct functional used in the convolutional layer.
"""
conv_func_mapper = {
torch.nn.Conv1d: F.conv1d,
torch.nn.Conv2d: F.conv2d,
torch.nn.Conv3d: F.conv3d
}
return conv_func_mapper[type(conv_module)]
@staticmethod
def get_inv_conv_method(conv_module):
"""
Get dimension-specific convolution inversion layer.
The forward pass and inversion are made in a
'dimensionality-agnostic' manner and are the same for
all nd instances of the layer, except for the functional
that needs to be used.
Args:
conv_module: instance of convolutional layer.
Returns:
The correct functional used for inverting the convolutional layer.
"""
conv_func_mapper = {
torch.nn.Conv1d: F.conv_transpose1d,
torch.nn.Conv2d: F.conv_transpose2d,
torch.nn.Conv3d: F.conv_transpose3d
}
return conv_func_mapper[type(conv_module)]
@module_tracker
def silent_pass(self, m, in_tensor: torch.Tensor,
out_tensor: torch.Tensor):
# Placeholder forward hook for layers that do not need
# to store any specific data. Still useful for module tracking.
pass
@staticmethod
def get_inv_max_pool_method(max_pool_instance):
"""
Get dimension-specific max_pooling layer.
The forward pass and inversion are made in a
'dimensionality-agnostic' manner and are the same for
all nd instances of the layer, except for the functional
that needs to be used.
Args:
max_pool_instance: instance of max_pool layer.
Returns:
The correct functional used in the max_pooling layer.
"""
conv_func_mapper = {
torch.nn.MaxPool1d: F.max_unpool1d,
torch.nn.MaxPool2d: F.max_unpool2d,
torch.nn.MaxPool3d: F.max_unpool3d
}
return conv_func_mapper[type(max_pool_instance)]
def linear_inverse(self, m, relevance_in):
if self.method == "e-rule":
m.in_tensor = m.in_tensor.pow(self.p)
w = m.weight.pow(self.p)
norm = F.linear(m.in_tensor, w, bias=None)
norm = norm + torch.sign(norm) * self.eps
relevance_in[norm == 0] = 0
norm[norm == 0] = 1
relevance_out = F.linear(relevance_in / norm,
w.t(), bias=None)
relevance_out *= m.in_tensor
del m.in_tensor, norm, w, relevance_in
return relevance_out
if self.method == "b-rule":
out_c, in_c = m.weight.size()
w = m.weight.repeat((4, 1))
# First and third channel repetition only contain the positive weights
w[:out_c][w[:out_c] < 0] = 0
w[2 * out_c:3 * out_c][w[2 * out_c:3 * out_c] < 0] = 0
# Second and fourth channel repetition with only the negative weights
w[1 * out_c:2 * out_c][w[1 * out_c:2 * out_c] > 0] = 0
w[-out_c:][w[-out_c:] > 0] = 0
# Repeat across channel dimension (pytorch always has channels first)
m.in_tensor = m.in_tensor.repeat((1, 4))
m.in_tensor[:, :in_c][m.in_tensor[:, :in_c] < 0] = 0
m.in_tensor[:, -in_c:][m.in_tensor[:, -in_c:] < 0] = 0
m.in_tensor[:, 1 * in_c:3 * in_c][m.in_tensor[:, 1 * in_c:3 * in_c] > 0] = 0
# Normalize such that the sum of the individual importance values
# of the input neurons divided by the norm
# yields 1 for an output neuron j if divided by norm (v_ij in paper).
# Norm layer just sums the importance values of the inputs
# contributing to output j for each j. This will then serve as the normalization
# such that the contributions of the neurons sum to 1 in order to
# properly split up the relevance of j amongst its roots.
norm_shape = m.out_shape
norm_shape[1] *= 4
norm = torch.zeros(norm_shape).to(self.device)
for i in range(4):
norm[:, out_c * i:(i + 1) * out_c] = F.linear(
m.in_tensor[:, in_c * i:(i + 1) * in_c], w[out_c * i:(i + 1) * out_c], bias=None)
# Double number of output channels for positive and negative norm per
# channel.
norm_shape[1] = norm_shape[1] // 2
new_norm = torch.zeros(norm_shape).to(self.device)
new_norm[:, :out_c] = norm[:, :out_c] + norm[:, out_c:2 * out_c]
new_norm[:, out_c:] = norm[:, 2 * out_c:3 * out_c] + norm[:, 3 * out_c:]
norm = new_norm
# Some 'rare' neurons only receive either
# only positive or only negative inputs.
# Conservation of relevance does not hold, if we also
# rescale those neurons by (1+beta) or -beta.
# Therefore, catch those first and scale norm by
# the according value, such that it cancels in the fraction.
# First, however, avoid NaNs.
mask = norm == 0
# Set the norm to anything non-zero, e.g. 1.
# The actual inputs are zero at this point anyways, that
# is why norm is zero in the first place.
norm[mask] = 1
# The norm in the b-rule has shape (N, 2*out_c, *spatial_dims).
# The first out_c block corresponds to the positive norms,
# the second out_c block corresponds to the negative norms.
# We find the rare neurons by choosing those nodes per channel
# in which either the positive norm ([:, :out_c]) is zero, or
# the negative norm ([:, :out_c]) is zero.
rare_neurons = (mask[:, :out_c] + mask[:, out_c:])
# Also, catch new possibilities for norm == zero to avoid NaN..
# The actual value of norm again does not really matter, since
# the pre-factor will be zero in this case.
norm[:, :out_c][rare_neurons] *= 1 if self.beta == -1 else 1 + self.beta
norm[:, out_c:][rare_neurons] *= 1 if self.beta == 0 else -self.beta
# Add stabilizer term to norm to avoid numerical instabilities.
norm += self.eps * torch.sign(norm)
input_relevance = relevance_in.squeeze(dim=-1).repeat(1, 4)
input_relevance[:, :2*out_c] *= (1+self.beta)/norm[:, :out_c].repeat(1, 2)
input_relevance[:, 2*out_c:] *= -self.beta/norm[:, out_c:].repeat(1, 2)
inv_w = w.t()
relevance_out = torch.zeros_like(m.in_tensor)
for i in range(4):
relevance_out[:, i*in_c:(i+1)*in_c] = F.linear(
input_relevance[:, i*out_c:(i+1)*out_c],
weight=inv_w[:, i*out_c:(i+1)*out_c], bias=None)
relevance_out *= m.in_tensor
sum_weights = torch.zeros([in_c, in_c * 4, 1]).to(self.device)
for i in range(in_c):
sum_weights[i, i::in_c] = 1
relevance_out = F.conv1d(relevance_out[:, :, None], weight=sum_weights, bias=None)
del sum_weights, input_relevance, norm, rare_neurons, \
mask, new_norm, m.in_tensor, w, inv_w
return relevance_out
@module_tracker
def linear_fwd_hook(self, m, in_tensor: torch.Tensor,
out_tensor: torch.Tensor):
setattr(m, "in_tensor", in_tensor[0])
setattr(m, "out_shape", list(out_tensor.size()))
return
def max_pool_nd_inverse(self, layer_instance, relevance_in):
# In case the output had been reshaped for a linear layer,
# make sure the relevance is put into the same shape as before.
relevance_in = relevance_in.view(layer_instance.out_shape)
invert_pool = self.get_inv_max_pool_method(layer_instance)
inverted = invert_pool(relevance_in, layer_instance.indices,
layer_instance.kernel_size, layer_instance.stride,
layer_instance.padding, output_size=layer_instance.in_shape)
del layer_instance.indices
return inverted
@module_tracker
def max_pool_nd_fwd_hook(self, m, in_tensor: torch.Tensor,
out_tensor: torch.Tensor):
# Ignore unused for pylint
_ = self
# Save the return indices value to make sure
tmp_return_indices = bool(m.return_indices)
m.return_indices = True
_, indices = m.forward(in_tensor[0])
m.return_indices = tmp_return_indices
setattr(m, "indices", indices)
setattr(m, 'out_shape', out_tensor.size())
setattr(m, 'in_shape', in_tensor[0].size())
def conv_nd_inverse(self, m, relevance_in):
# In case the output had been reshaped for a linear layer,
# make sure the relevance is put into the same shape as before.
relevance_in = relevance_in.view(m.out_shape)
# Get required values from layer
inv_conv_nd = self.get_inv_conv_method(m)
conv_nd = self.get_conv_method(m)
if self.method == "e-rule":
with torch.no_grad():
m.in_tensor = m.in_tensor.pow(self.p).detach()
w = m.weight.pow(self.p).detach()
norm = conv_nd(m.in_tensor, weight=w, bias=None,
stride=m.stride, padding=m.padding,
groups=m.groups)
norm = norm + torch.sign(norm) * self.eps
relevance_in[norm == 0] = 0
norm[norm == 0] = 1
relevance_out = inv_conv_nd(relevance_in/norm,
weight=w, bias=None,
padding=m.padding, stride=m.stride,
groups=m.groups)
relevance_out *= m.in_tensor
del m.in_tensor, norm, w
return relevance_out
if self.method == "b-rule":
with torch.no_grad():
w = m.weight
out_c, in_c = m.out_channels, m.in_channels
repeats = np.array(np.ones_like(w.size()).flatten(), dtype=int)
repeats[0] *= 4
w = w.repeat(tuple(repeats))
# First and third channel repetition only contain the positive weights
w[:out_c][w[:out_c] < 0] = 0
w[2 * out_c:3 * out_c][w[2 * out_c:3 * out_c] < 0] = 0
# Second and fourth channel repetition with only the negative weights
w[1 * out_c:2 * out_c][w[1 * out_c:2 * out_c] > 0] = 0
w[-out_c:][w[-out_c:] > 0] = 0
repeats = np.array(np.ones_like(m.in_tensor.size()).flatten(), dtype=int)
repeats[1] *= 4
# Repeat across channel dimension (pytorch always has channels first)
m.in_tensor = m.in_tensor.repeat(tuple(repeats))
m.in_tensor[:, :in_c][m.in_tensor[:, :in_c] < 0] = 0
m.in_tensor[:, -in_c:][m.in_tensor[:, -in_c:] < 0] = 0
m.in_tensor[:, 1 * in_c:3 * in_c][m.in_tensor[:, 1 * in_c:3 * in_c] > 0] = 0
groups = 4
# Normalize such that the sum of the individual importance values
# of the input neurons divided by the norm
# yields 1 for an output neuron j if divided by norm (v_ij in paper).
# Norm layer just sums the importance values of the inputs
# contributing to output j for each j. This will then serve as the normalization
# such that the contributions of the neurons sum to 1 in order to
# properly split up the relevance of j amongst its roots.
norm = conv_nd(m.in_tensor, weight=w, bias=None, stride=m.stride,
padding=m.padding, dilation=m.dilation, groups=groups * m.groups)
# Double number of output channels for positive and negative norm per
# channel. Using list with out_tensor.size() allows for ND generalization
new_shape = m.out_shape
new_shape[1] *= 2
new_norm = torch.zeros(new_shape).to(self.device)
new_norm[:, :out_c] = norm[:, :out_c] + norm[:, out_c:2 * out_c]
new_norm[:, out_c:] = norm[:, 2 * out_c:3 * out_c] + norm[:, 3 * out_c:]
norm = new_norm
# Some 'rare' neurons only receive either
# only positive or only negative inputs.
# Conservation of relevance does not hold, if we also
# rescale those neurons by (1+beta) or -beta.
# Therefore, catch those first and scale norm by
# the according value, such that it cancels in the fraction.
# First, however, avoid NaNs.
mask = norm == 0
# Set the norm to anything non-zero, e.g. 1.
# The actual inputs are zero at this point anyways, that
# is why norm is zero in the first place.
norm[mask] = 1
# The norm in the b-rule has shape (N, 2*out_c, *spatial_dims).
# The first out_c block corresponds to the positive norms,
# the second out_c block corresponds to the negative norms.
# We find the rare neurons by choosing those nodes per channel
# in which either the positive norm ([:, :out_c]) is zero, or
# the negative norm ([:, :out_c]) is zero.
rare_neurons = (mask[:, :out_c] + mask[:, out_c:])
# Also, catch new possibilities for norm == zero to avoid NaN..
# The actual value of norm again does not really matter, since
# the pre-factor will be zero in this case.
norm[:, :out_c][rare_neurons] *= 1 if self.beta == -1 else 1 + self.beta
norm[:, out_c:][rare_neurons] *= 1 if self.beta == 0 else -self.beta
# Add stabilizer term to norm to avoid numerical instabilities.
norm += self.eps * torch.sign(norm)
spatial_dims = [1] * len(relevance_in.size()[2:])
input_relevance = relevance_in.repeat(1, 4, *spatial_dims)
input_relevance[:, :2*out_c] *= (1+self.beta)/norm[:, :out_c].repeat(1, 2, *spatial_dims)
input_relevance[:, 2*out_c:] *= -self.beta/norm[:, out_c:].repeat(1, 2, *spatial_dims)
# Each of the positive / negative entries needs its own
# convolution. TODO: Can this be done in groups, too?
relevance_out = torch.zeros_like(m.in_tensor)
# Weird code to make up for loss of size due to stride
tmp_result = result = None
for i in range(4):
tmp_result = inv_conv_nd(
input_relevance[:, i*out_c:(i+1)*out_c],
weight=w[i*out_c:(i+1)*out_c],
bias=None, padding=m.padding, stride=m.stride,
groups=m.groups)
result = torch.zeros_like(relevance_out[:, i*in_c:(i+1)*in_c])
tmp_size = tmp_result.size()
slice_list = [slice(0, l) for l in tmp_size]
result[slice_list] += tmp_result
relevance_out[:, i*in_c:(i+1)*in_c] = result
relevance_out *= m.in_tensor
sum_weights = torch.zeros([in_c, in_c * 4, *spatial_dims]).to(self.device)
for i in range(m.in_channels):
sum_weights[i, i::in_c] = 1
relevance_out = conv_nd(relevance_out, weight=sum_weights, bias=None)
del sum_weights, m.in_tensor, result, mask, rare_neurons, norm, \
new_norm, input_relevance, tmp_result, w
return relevance_out
@module_tracker
def conv_nd_fwd_hook(self, m, in_tensor: torch.Tensor,
out_tensor: torch.Tensor):
setattr(m, "in_tensor", in_tensor[0])
setattr(m, 'out_shape', list(out_tensor.size()))
return
|
[
"celineevianne@gmail.com"
] |
celineevianne@gmail.com
|
6bdc4df21afbf8fd9b980b1ac18578cfffd6e4e4
|
796c921e8e75e94737854f90835216c87d2dfc8e
|
/PracticeBackEnd.py
|
ffd679678b60e46dd657b6062db599a9ce8c9ec2
|
[] |
no_license
|
bsvonkin/Year9DesignCS4-PythonBS
|
e94f3df3cf59e1b4cae19a2fb9e39d02bed4e5c6
|
fe465d54829eda2f741b90ce5150baf7a04f43ec
|
refs/heads/master
| 2020-03-28T11:03:47.283334
| 2018-12-18T14:05:53
| 2018-12-18T14:05:53
| 148,173,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
import tkinter as tk
root = tk.Tk()
root.title("Algebra Calculator")
def dab():
print("dab")
labUN = tk.Label(root, text = "Equation")
labUN.grid(row=1, column=0)
entUN = tk.Entry(root, bg="#F0F0F0")
entUN.grid(row=2, column=0)
btnSubmit = tk.Button(root, text = "Submit", command = dab)
btnSubmit.grid(row=3, column=0)
def change(*args):
print("running change")
print(var.get())
OPTIONS = [
"All Steps",
"No Steps",
"Step by Step",
]
var = tk.StringVar()
var.set(OPTIONS[0])
var.trace("w",change)
dropDownMenu = tk.OptionMenu(root,var, OPTIONS[0],OPTIONS[1],OPTIONS[2])
dropDownMenu.grid(row=5,column=1)
labPassword = tk.Label(root, text = "Answer")
labPassword.grid(row=5, column=0)
entAnswer = tk.Entry(root, bg="#F0F0F0")
entAnswer.grid(row=6,column=0,)
entAnswer.config(state = "disable")
labSteps = tk.Label(root, text = "Steps")
labSteps.grid(row=7, column=0)
entSteps = tk.Entry(root, bg="#F0F0F0")
entSteps.grid(row=8, column=0)
entSteps.config(state = "disable")
root.mainloop()
|
[
"ben.svonkin@Y22-Design-Ben-Svonkin.local"
] |
ben.svonkin@Y22-Design-Ben-Svonkin.local
|
767d996c6f0d05e96bcc2fcacdde479d65e75b71
|
7601a6be6e581053820bc3fffd6864e19b1ef680
|
/helloWorld/gurobiTest.py
|
8e32ee5c39fb91d88fca15e07f9088a76dce0712
|
[] |
no_license
|
LucyBean/Part-III
|
0a89a96f355be6b4af44753d74c9569293e772fc
|
cb753a01c6720269fb636725ce2a97c8d143c183
|
refs/heads/master
| 2020-08-01T22:38:58.500258
| 2017-02-23T14:32:36
| 2017-02-23T14:32:36
| 73,567,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
'''
Created on Nov 11, 2016
@author: Lucy
'''
from gurobipy import Model, GRB, LinExpr
metabolites = ["Ru5P","FP2","F6P","GAP","R5P"]
revReactions = ["pgi","fba","rpi","rpe"]
irrevReactions = ["gap","zwf","pfk","fbp","prs"]
reactions = revReactions + irrevReactions
reactionsToInclude = ["prs","gap"]
reactionsToExclude = ["zwf"]
#Incidence matrix as a dict
incidence = {
("pgi","F6P"):1,
("fba","FP2"):-1, ("fba","GAP"):2,
("rpi","Ru5P"):-1, ("rpi","R5P"):1,
("rpe","Ru5P"):-2, ("rpe","F6P"):2, ("rpe","GAP"):1, ("rpe","R5P"):-1,
("gap","GAP"):-1,
("zwf","Ru5P"):1,
("pfk","FP2"):1, ("pfk","F6P"):-1,
("fbp","FP2"):-1, ("fbp","F6P"):1,
("prs","R5P"):-1
}
#Make models
model = Model("incidence")
coeffs = model.addVars(reactions)
### Constraints
# Allow reversible reactions to have negative co-efficients
for r in revReactions:
coeffs[r].lb = -GRB.INFINITY
# This does not work, see updated "models.py" in src
# Cx = 0
cs = {}
for m in metabolites:
cs[m] = LinExpr()
for (e,m) in incidence:
ratio = incidence[e,m]
cs[m] += ratio * coeffs[e]
for c in cs:
model.addConstr(cs[c] == 0)
# Include and exclude reactions
for r in reactionsToInclude:
model.addConstr(coeffs[r] >= 1)
for r in reactionsToExclude:
model.addConstr(coeffs[r] == 0)
### Optimize
model.optimize()
for c in coeffs:
if coeffs[c].x > 0:
print c , coeffs[c].x
|
[
"howobscene@msn.com"
] |
howobscene@msn.com
|
e693645cbd1ed303e80c309d0e7857d9a2248013
|
e58dcad84f3151cff22d0cd881bb41cc43eee9fd
|
/Server/predict.py
|
05f23f4fe804908f12d19af45c6b50a6006753d7
|
[] |
no_license
|
samdroid-apps/smart-light-system
|
68bb4ddcc3669700ac6374e43b09f2de2e1f44a4
|
595443a90992b8339949a9877279781efaab3095
|
refs/heads/master
| 2021-01-20T10:30:33.635255
| 2017-08-28T12:30:00
| 2017-08-28T12:30:00
| 101,639,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import os
import json
from sklearn.externals import joblib
import typing as T
from learn import NOT_FOUND_RSSI
if os.path.isfile('model.pkl') and os.path.isfile('model.bssids.json'):
model = joblib.load('model.pkl')
with open('model.bssids.json') as f:
bssid_to_index = json.load(f)
else:
model = None
def predict(data_list: T.List[dict]) -> float:
# 1 = inside
# 0 = outside
if model is None:
return 0.5
x = [NOT_FOUND_RSSI] * len(bssid_to_index)
for item in data_list:
bssid = item['bssid']
level = item['level']
if bssid in bssid_to_index:
x[bssid_to_index[bssid]] = level
X = [x]
Y = model.predict(X)
return Y[0]
|
[
"sam@sam.today"
] |
sam@sam.today
|
0169b1897bfbdf5de8a401d4e8fa088c6203bac7
|
81f2089aba838cdfb5d17b0a96c7917a57c8ae5c
|
/app.py
|
861290a90845f9728f16d4a2192dac3ad87b4a27
|
[] |
no_license
|
cotraak/AI.content.aggregator
|
731d3c89de8702d1e22662a23bebc5fc42c9b836
|
8608a15c97cf848970179ce2d65fceaf84422061
|
refs/heads/main
| 2023-01-30T14:32:47.675950
| 2020-12-11T05:22:37
| 2020-12-11T05:22:37
| 318,350,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
import re
from flask import render_template, Flask
from rss import news, arxiv
app=Flask(__name__)
papers=arxiv().get_recent()
latest_news, news=news().get_recent()
@app.route('/')
def homepage():
return render_template('index.html', papers=papers[:20], temp_res=latest_news)
@app.route('/papers')
def paperspage():
return render_template('papers.html', papers=papers)
@app.route('/news')
def newspage():
return render_template('news.html', news=news)
@app.route('/about')
def aboutpage():
return render_template('about.html')
if __name__ == "__main__":
app.run('0.0.0.0', debug=True, port=80)
|
[
"cotraak@Adityas-MacBook-Pro.local"
] |
cotraak@Adityas-MacBook-Pro.local
|
54e9c0cfbdd264fdd4f79302682d100a5b33fd11
|
f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb
|
/weapp/openapi/auth.py
|
cfbcd1a14628bc5a1673ff8b370dc4a3d2ef2dcc
|
[] |
no_license
|
chengdg/weizoom
|
97740c121724fae582b10cdbe0ce227a1f065ece
|
8b2f7befe92841bcc35e0e60cac5958ef3f3af54
|
refs/heads/master
| 2021-01-22T20:29:30.297059
| 2017-03-30T08:39:25
| 2017-03-30T08:39:25
| 85,268,003
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Administrator'
from core import resource
import wapi as api_resource
from core.jsonresponse import create_response
class Auth(resource.Resource):
"""
订单列表资源
"""
app = "openapi"
resource = "auth"
def post(request):
response = create_response(200)
response.data = []
username = request.POST.get('username','')
password = request.POST.get('password','')
auth = api_resource.post('open', 'auth_token', {'username':username,'password':password})
response.data = auth
return response.get_response()
|
[
"duhao@weizoom.com"
] |
duhao@weizoom.com
|
a78402b70c33a73889a5822a90a241cbbcf39467
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/jinja2/ext.py
|
c2df12d5592c4275135beffdb2194b22877c4654
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 25,078
|
py
|
# -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import next, with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
|
[
"dahlstrom@google.com"
] |
dahlstrom@google.com
|
b7ac6139378ae8a7c41afcd76fec64e7b8ee9df7
|
c60f7950f87c66e044c19c7ecdd61ce9a6cb961a
|
/views/__init__.py
|
0d7316e78843cd6be0d8c911a0ebb2761cd2b34a
|
[] |
no_license
|
yifan-blog/webpy-demo
|
fb4d37b3273a5b9076c3466c3c8854c02ee022c8
|
4c4a2bf5a372284f38d6d47d065cb8c121662c82
|
refs/heads/master
| 2020-04-17T11:26:10.482684
| 2018-10-20T12:11:07
| 2018-10-20T12:11:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# -*- coding: utf-8 -*-
import web
# 用于渲染HTML模板
render = web.template.render('views/', cache = False) # 禁用模板缓存特性
# 给模板渲染环境增加全局变量, 这样模板之间就可以嵌套了
render._add_global(render, 'render')
|
[
"owenliang1990@gmail.com"
] |
owenliang1990@gmail.com
|
befc484720fc8b2dd7e72ad047976b06972d3b9b
|
05526f4941be395c0d41b4eed16ede6ae45b5e3a
|
/src/ossify/tokenizer.py
|
902357cd116b2634aebe40f9a5b4c593ae199181
|
[
"BSD-3-Clause"
] |
permissive
|
ndevenish/ossify
|
940606d75932597aedac2e0377f1f18ee249a126
|
5045b0cee309093ad2ccf35a4b1d92c4bb2783f3
|
refs/heads/master
| 2022-07-14T19:58:55.616828
| 2020-05-09T00:28:19
| 2020-05-09T00:28:19
| 261,609,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,238
|
py
|
import io
import re
import sys
import token
import tokenize
from token import tok_name
from tokenize import TokenInfo
from typing import Iterator, List
numchars = "0123456789"
reNamelike = re.compile(r"[A-Za-z_]")
reWhitespace = re.compile("[ \t\n]+")
reName = re.compile(r"[A-Za-z_]\w*")
reStringStart = re.compile(r'"""|"|\'\'\'|\'')
def read_number(data):
if "3" in data:
print(data)
# Cheat for now
# -1 because will always be a newline, but sometimes that newline
# is an escaped newline
s = io.StringIO(data[:-1])
toke = next(tokenize.generate_tokens(s.readline))
if toke.type == token.NUMBER:
return toke.string
return False
def character_generator(file_interface, encoding="utf-8", verbose=False):
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
# print(line)
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
line_no += 1
elif match := reWhitespace.match(data, pos=pos):
newlines = match.group().count("\n")
if "\n" in match.group():
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no + newlines, match.end()),
line=line,
)
else:
yield TokenInfo(
type=token.OP,
string=" ",
start=(line_no, pos),
end=(line_no, match.end()),
line=line,
)
pos = match.end()
line_no += newlines
# elif data[pos] == "\t":
# if verbose:
# print(f"{pos}: Tab (sent space)")
# yield TokenInfo(
# type=token.OP,
# string=" ",
# start=(line_no, pos),
# end=(line_no, pos),
# line=line,
# )
# pos += 1
elif data[pos] == "\n":
if verbose:
print(f"{pos}: NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif (string := reStringStart.match(data, pos=pos)) and (
pos == 0 or data[pos - 1] in " \n\t{}="
):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
start_l = line_no
line_no += contents.count("\n")
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
full_str = quote_type + contents + quote_type
yield TokenInfo(
type=token.STRING,
string=full_str,
start=(start_l, pos),
end=(line_no + 1, pos + len(full_str)),
line="",
)
pos = end_match.end()
elif name := reName.match(data, pos=pos):
if verbose:
print(f"{pos}: NAME {name.group()}")
yield TokenInfo(
type=token.NAME,
string=name.group(),
start=(line_no, name.start()),
end=(line_no, name.end()),
line=line,
)
pos += len(name.group())
elif data[pos] in "0123456789":
yield TokenInfo(
type=token.NUMBER,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos),
line=line,
)
pos += 1
else:
if verbose:
print(f"OP: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no, pos),
line="",
)
yield TokenInfo(
type=token.ENDMARKER,
string="",
start=(line_no, pos + 1),
end=(line_no, pos + 1),
line="",
)
return None
def simple_generator(file_interface, encoding="utf-8", verbose=True):
#
# needcont: Currently processing a continuing string
# contstr: The string currently being built
# endprog: The match condition for ending a continuing string
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
# last_line = b""
# line = b""
# line_no = 0
# while True:
# try:
# last_line = line
# line = file_interface()
# except StopIteration:
# line = b""
# if encoding is not None:
# line = line.decode(encoding)
# line_no += 1
# pos, max = 0, len(line)
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if match := reWhitespace.match(line_remaining):
# Skip whitespace
pos += match.end()
elif data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
elif data[pos] == "\n":
if verbose:
print(f"NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif match := reName.match(line_remaining):
if verbose:
print(f"NAME: {match.group(0)}")
pos += match.end()
yield TokenInfo(
type=token.NAME,
string=match.group(0),
start=(line_no, match.start()),
end=(line_no, match.end()),
line=line,
)
elif data[pos] == "#":
pos = line_end
elif number := read_number(line_remaining):
if verbose:
print(f"NUMBER: {number}")
yield TokenInfo(
type=token.NUMBER,
string=number,
start=(line_no, pos),
end=(line_no, pos + len(number)),
line=line,
)
pos += len(number)
elif string := reStringStart.match(data, pos=pos):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
pos = end_match.end()
else:
if verbose:
print(f"CHAR: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
return TokenInfo(type=token.ENDMARKER, string="", start=pos, end=pos, line="")
Mark = int # NewType('Mark', int)
exact_token_types = token.EXACT_TOKEN_TYPES # type: ignore
def shorttok(tok: tokenize.TokenInfo) -> str:
return (
"%-25.25s"
% f"{tok.start[0]}.{tok.start[1]}: {token.tok_name[tok.type]}:{tok.string!r}"
)
class Tokenizer:
"""Caching wrapper for the tokenize module.
This is pretty tied to Python's syntax.
"""
_tokens: List[tokenize.TokenInfo]
def __init__(
self, tokengen: Iterator[tokenize.TokenInfo], *, verbose: bool = False
):
self._tokengen = tokengen
self._tokens = []
self._index = 0
self._verbose = verbose
if verbose:
self.report(False, False)
def getnext(self) -> tokenize.TokenInfo:
"""Return the next token and updates the index."""
cached = True
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
cached = False
tok = self._tokens[self._index]
self._index += 1
if self._verbose:
self.report(cached, False)
return tok
def peek(self) -> tokenize.TokenInfo:
"""Return the next token *without* updating the index."""
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
return self._tokens[self._index]
def diagnose(self) -> tokenize.TokenInfo:
if not self._tokens:
self.getnext()
return self._tokens[-1]
def mark(self) -> Mark:
return self._index
def reset(self, index: Mark) -> None:
if index == self._index:
return
assert 0 <= index <= len(self._tokens), (index, len(self._tokens))
old_index = self._index
self._index = index
if self._verbose:
self.report(True, index < old_index)
def report(self, cached: bool, back: bool) -> None:
if back:
fill = "-" * self._index + "-"
elif cached:
fill = "-" * self._index + ">"
else:
fill = "-" * self._index + "*"
if self._index == 0:
print(f"{fill} (Bof)")
else:
tok = self._tokens[self._index - 1]
print(f"{fill} {shorttok(tok)}")
def main():
import argparse
# Helper error handling routines
def perror(message):
sys.stderr.write(message)
sys.stderr.write("\n")
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog="python -m tokenize")
parser.add_argument(
dest="filename",
nargs="?",
metavar="filename.py",
help="the file to tokenize; defaults to stdin",
)
parser.add_argument(
"-e",
"--exact",
dest="exact",
action="store_true",
help="display token names using the exact type",
)
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with open(filename, "r") as f:
tokens = list(character_generator(f))
else:
filename = "<stdin>"
tokens = character_generator(sys.stdin, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
|
[
"ndevenish@gmail.com"
] |
ndevenish@gmail.com
|
5d607d29fe1245777fd78a219f24a7700613df36
|
8b23c6fe56daa648735cbbaa6e658a3ab6cf6899
|
/tests/test_multibase.py
|
ba301734aac857eb63847f3fa072722e19607dad
|
[
"MIT"
] |
permissive
|
JuFil/py-multibase
|
c229e7974c1372ced114ecccf1ae2ac1d69add7e
|
cb8770cc04e445b852d56f6e9e710618f7f83c77
|
refs/heads/master
| 2020-03-27T00:20:41.787840
| 2017-09-15T13:39:27
| 2017-09-15T13:39:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `multibase` package."""
import pytest
from morphys import ensure_bytes
from multibase import encode, decode, is_encoded
TEST_FIXTURES = (
('identity', 'yes mani !', '\x00yes mani !'),
('base2', 'yes mani !', '01111001011001010111001100100000011011010110000101101110011010010010000000100001'),
('base8', 'yes mani !', '7171312714403326055632220041'),
('base10', 'yes mani !', '9573277761329450583662625'),
('base16', 'yes mani !', 'f796573206d616e692021'),
('base16', '\x01', 'f01'),
('base16', '\x0f', 'f0f'),
('base32hex', 'yes mani !', 'vf5in683dc5n6i811'),
('base32', 'yes mani !', 'bpfsxgidnmfxgsibb'),
('base32z', 'yes mani !', 'hxf1zgedpcfzg1ebb'),
('base58flickr', 'yes mani !', 'Z7Pznk19XTTzBtx'),
('base58btc', 'yes mani !', 'z7paNL19xttacUY'),
('base64', '÷ïÿ', 'mw7fDr8O/'),
('base64url', '÷ïÿ', 'uw7fDr8O_'),
)
INCORRECT_ENCODINGS = ('base58', 'base4')
INCORRECT_ENCODED_DATA = ('abcdefghi', '!qweqweqeqw')
@pytest.mark.parametrize('encoding,data,encoded_data', TEST_FIXTURES)
def test_encode(encoding, data, encoded_data):
assert encode(encoding, data) == ensure_bytes(encoded_data)
@pytest.mark.parametrize('encoding', INCORRECT_ENCODINGS)
def test_encode_incorrect_encoding(encoding):
with pytest.raises(ValueError) as excinfo:
encode(encoding, 'test data')
assert 'not supported' in str(excinfo.value)
@pytest.mark.parametrize('_,data,encoded_data', TEST_FIXTURES)
def test_decode(_, data, encoded_data):
assert decode(encoded_data) == ensure_bytes(data)
@pytest.mark.parametrize('encoded_data', INCORRECT_ENCODED_DATA)
def test_decode_incorrect_encoding(encoded_data):
with pytest.raises(ValueError) as excinfo:
decode(encoded_data)
assert 'Can not determine encoding' in str(excinfo.value)
@pytest.mark.parametrize('_,data,encoded_data', TEST_FIXTURES)
def test_is_encoded(_, data, encoded_data):
assert is_encoded(encoded_data)
assert not is_encoded(data)
@pytest.mark.parametrize('encoded_data', INCORRECT_ENCODED_DATA)
def test_is_encoded_incorrect_encoding(encoded_data):
assert not is_encoded(encoded_data)
|
[
"dhruvbaldawa@gmail.com"
] |
dhruvbaldawa@gmail.com
|
84e0d64f73b6e5380c0421c9450b2d466891c522
|
3b73465e112dba0ab729bbf7d5bf6576882fd2b8
|
/dataloaders/MRI_dataset.py
|
2bbd30915d6c491030681249a87343b354310dbd
|
[] |
no_license
|
Wugengxian/MRI
|
65777cc74d420a57d1befd330a60cfb2696b1235
|
560cdf590659688ea260063dd66eab84cdd2b981
|
refs/heads/main
| 2023-05-07T23:35:15.575761
| 2021-05-26T23:03:45
| 2021-05-26T23:03:45
| 365,596,833
| 3
| 1
| null | 2021-05-11T13:55:53
| 2021-05-08T19:36:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
from torch.utils.data import Dataset
import numpy as np
from numpy import random
from torchvision.transforms import transforms
import dataloaders.custom_transforms as tr
from torch.utils.data import DataLoader
class MRI_dataset(Dataset):
def __init__(self):
super().__init__()
self.md = np.load("dataloaders/md_train.npy")
self.fa = np.load("dataloaders/fa_train.npy")
self.mask = np.load("dataloaders/mask_train.npy")
self.label = np.load("dataloaders/label_train.npy")
self.real_length = self.label.shape[0]
self.zeros = np.where(self.label == 0)
self.oversampled_length = self.real_length + 3 * len(self.zeros[0])
def __len__(self):
return self.oversampled_length
def __getitem__(self, index):
real_index = index % self.oversampled_length
if(real_index >= self.real_length) : real_index = self.zeros[0][(real_index - self.real_length) % len(self.zeros[0])]
md, fa, mask, label = self.md[real_index], self.fa[real_index], self.mask[real_index], self.label[real_index]
sample = {'md': md, 'fa': fa, "mask": mask, "label": label}
return self.transform_tr(sample)
def transform_tr(self, sample, tr_type:int = 0):
composed_transforms = transforms.Compose([tr.RamdomMask(), tr.RamdomFlip(), tr.RamdomRotate(), tr.ToTensor()])
return composed_transforms(sample)
if __name__ == "__main__":
data = DataLoader(MRI_dataset(), batch_size=1, shuffle=True, drop_last=False)
pass
|
[
"60342704+Wugengxian@users.noreply.github.com"
] |
60342704+Wugengxian@users.noreply.github.com
|
af6f406c0e717181141f93fbe7e46f67df5c2931
|
2b18daffd0159e5bfb262626f6091239733a7d7e
|
/PGMF/script/focalisoseq.py
|
bb5623b706357028bd22a0f9ae71cd161a9258cc
|
[
"MIT"
] |
permissive
|
haiwangyang/PGMF
|
df848b824303c8014686b2829c60acaae979629d
|
bb6b0db8004e552607ac5d740271bbf0e7ab5f5c
|
refs/heads/master
| 2021-09-15T23:43:46.402598
| 2018-06-12T21:33:06
| 2018-06-12T21:33:06
| 115,026,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,390
|
py
|
#!/usr/bin/env python
"""
Purpose:
Handling pacbio isoseq
"""
import sharedinfo
import re
import focalintersect
from pyfaidx import Fasta
import pysam
from collections import Counter
from sharedinfo import exist_file, get_lines
def summarize_polyA(fasta):
""" summarize polyA type AAAAAAAAA or TTTTTTTTTT or others """
lst = []
for name in fasta.keys():
seq = str(fasta[name])
L = seq[0:10]
R = seq[-10:]
end = L + R
most_common_char = Counter(end).most_common(1)[0][0]
Ln = Counter(L)[most_common_char]
Rn = Counter(R)[most_common_char]
if Ln > Rn:
m = re.search('^(' + most_common_char + '+)', seq)
if m:
lst.append(["L", most_common_char, m.group(1), name, seq])
else:
lst.append(["L", most_common_char, "-", name, seq])
else:
m = re.search('(' + most_common_char + '+)$', seq)
if m:
lst.append(["R", most_common_char, m.group(1), name, seq])
else:
lst.append(["R", most_common_char, "-", name, seq])
return(lst)
def printout_polyA_summary(sample):
""" analyze the list of polyA
"""
species, sex, tissue, replicate = sample.split("_")
fi = FocalIsoseq(species, sex, tissue, replicate)
for i in fi.polyA:
if i[1] == 'A' or i[1] == 'T':
print("pass", sample, i[0], i[1], len(i[2]))
else:
print("fail", sample, i[0], i[1], len(i[2]))
def seq2polyA(seq):
""" input seq
output polyA report
(1) pass or not
(2) Left or Right
(3) most common character
(4) length of polyA
(5) length of isoseq
"""
lst = []
L = seq[0:10]
R = seq[-10:]
end = L + R
most_common_char = Counter(end).most_common(1)[0][0]
Ln = Counter(L)[most_common_char]
Rn = Counter(R)[most_common_char]
if Ln > Rn:
m = re.search('^(' + most_common_char + '+)', seq)
if m:
lst = ["L", most_common_char, m.group(1)]
else:
lst = ["L", most_common_char, "-"]
else:
m = re.search('(' + most_common_char + '+)$', seq)
if m:
lst = ["R", most_common_char, m.group(1)]
else:
lst = ["R", most_common_char, "-"]
lst2 = []
if lst[1] == 'A' or lst[1] == 'T':
lst2 = ["pass", lst[0], lst[1], str(len(lst[2])), str(len(seq))]
else:
lst2 = ["fail", lst[0], lst[1], str(len(lst[2])), str(len(seq))]
return lst2
def printout_polyA_len_for_sample(sample):
""" print out polyA len and isoseq len for different jaccard type """
species, sex, tissue, replicate = sample.split("_")
fi = FocalIsoseq(species, sex, tissue, replicate)
for isoseqid in fi.jaccard_zero_isoseqid:
seq = str(fi.fasta[isoseqid])
intronnum = fi.isoseqid2intronnum[isoseqid]
print("jaccard==0" + "\t" + str(intronnum) + "\t" + "\t".join(seq2polyA(seq)))
for isoseqid in fi.jaccard_plus_isoseqid:
seq = str(fi.fasta[isoseqid])
intronnum = fi.isoseqid2intronnum[isoseqid]
print("jaccard>0" + "\t" + str(intronnum) + "\t" + "\t".join(seq2polyA(seq)))
for isoseqid in fi.unmapped_isoseqid:
seq = str(fi.fasta[isoseqid])
print("unmapped" + "\t" + "NA" + "\t" + "\t".join(seq2polyA(seq)))
def generate_jaccard_type_isoseqid_bed_for_sample(sample):
""" print out isoseq bed for different jaccard type """
species, sex, tissue, replicate = sample.split("_")
fi = FocalIsoseq(species, sex, tissue, replicate)
with open("../data/pacbio/jaccard_zero." + sample + ".bed", "w") as f:
for isoseqid in fi.jaccard_zero_isoseqid:
f.write(fi.isoseqid2bambedline[isoseqid])
with open("../data/pacbio/jaccard_plus." + sample + ".bed", "w") as f:
for isoseqid in fi.jaccard_plus_isoseqid:
f.write(fi.isoseqid2bambedline[isoseqid])
def print_unique_fasta_number():
""" calculate unique fasta, and remove redundancy """
st = set()
dgri_samples = ["dgri_f_wb_r1", "dgri_m_wb_r1", "dgri_f_wb_r2", "dgri_m_wb_r2"]
dmel_samples = ["dmel_f_go_r1", "dmel_m_go_r1", "dmel_f_wb_r1", "dmel_m_wb_r1"]
samples = {"dgri":dgri_samples, "dmel":dmel_samples}
for dxxx in ['dmel', 'dgri']:
st = set()
for sample in samples[dxxx]:
fasta = Fasta("../data/pacbio/" + sample + ".fasta")
for name in fasta.keys():
seq = fasta[name]
st.add(seq)
print(len(st))
class FocalIsoseq:
"""FocalIsoseq object"""
def __init__(self, species, sex, tissue, replicate):
self.species = species
self.sex = sex
self.tissue = tissue
self.replicate = replicate
self.sample = species + "_" + sex + "_" + tissue + "_" + replicate
self.fasta = Fasta("../data/pacbio/" + self.sample + ".fasta")
self.bam = pysam.AlignmentFile("../data/pacbio/" + self.sample + ".bam", "rb")
self.polyA = summarize_polyA(self.fasta)
self.all_isoseqid = set(self.fasta.keys())
self.mapped_isoseqid = self.get_mapped_isoseqid()
self.unmapped_isoseqid = self.all_isoseqid - self.mapped_isoseqid
self.jaccard_plus_isoseqid = self.get_jaccard_plus_isoseqid()
self.jaccard_zero_isoseqid = self.mapped_isoseqid - self.jaccard_plus_isoseqid
self.isoseqid2bambedline = self.get_isoseqid2bambedline()
self.isoseqid2intronnum = self.get_isoseqid2intronnum()
self.isoseqid2position = self.get_isoseqid2position()
def get_jaccard_plus_isoseqid(self):
""" get jaccard > 0 isoseqid """
lines = get_lines("../data/output", self.species + "_" + self.sex + "_" + self.tissue + "_" + self.replicate + ".B.txt")
st = set()
for line in lines:
(position_isoseqid, transid, intersection, union, jaccard) = line.rstrip().split("\t")
position, isoseqid = position_isoseqid.split(".")
if float(jaccard) > 0:
st.add(isoseqid)
return st
def get_mapped_isoseqid(self):
"""
return isoseqid in bam file
"""
st = set()
for read in self.bam:
isoseqid = read.qname
st.add(isoseqid)
return st
def get_isoseqid2bambedline(self):
lines = get_lines("../data/pacbio", self.sample + ".bam.bed")
dct = dict()
for line in lines:
(chrom, chromStart, chromEnd, position_isoseqid, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split("\t")
position, isoseqid = position_isoseqid.split(".")
dct[isoseqid] = line
return dct
def get_isoseqid2position(self):
lines = get_lines("../data/pacbio", self.sample + ".bam.bed")
dct = dict()
for line in lines:
(chrom, chromStart, chromEnd, position_isoseqid, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split("\t")
position, isoseqid = position_isoseqid.split(".")
dct[isoseqid] = position
return dct
def get_isoseqid2intronnum(self):
""" get intron information from bam.bed """
lines = get_lines("../data/pacbio", self.sample + ".bam.bed")
dct = dict()
for line in lines:
(chrom, chromStart, chromEnd, position_isoseqid, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split("\t")
position, isoseqid = position_isoseqid.split(".")
exons = focalintersect.get_exons(int(chromStart), int(chromEnd), blockSizes, blockStarts)
num_exons = len(exons)
dct[isoseqid] = num_exons - 1
return dct
def main():
#""" printout polyA len """
#for sample in sharedinfo.pacbio_sample:
# printout_polyA_summary(sample)
""" check len distribution of polyA and isoseq """
# printout_polyA_len_for_sample("dmel_m_go_r1")
for sample in sharedinfo.pacbio_sample:
generate_jaccard_type_isoseqid_bed_for_sample(sample)
if __name__ == '__main__':
# main()
# fi = FocalIsoseq("dgri", "f", "wb", "r1")
print_unique_fasta_number()
|
[
"haiwangyang@gmail.com"
] |
haiwangyang@gmail.com
|
f5af7130c2c42bf1d02b6701385b69e55840ac6e
|
1e703590bec8d2bb82045123ffc701cf0ca2a63e
|
/recipe_db/recipe_db_app/views.py
|
b71f3c26f57a55c471939841be11d7af3a98b7db
|
[] |
no_license
|
mjrobbins18/Recipe_DB_Back
|
1e1331b05a71716f9a77798fcfdcdd1e52021db0
|
5102d1bbc2af0d3a82ea56bfc3b690aa05e92753
|
refs/heads/main
| 2023-08-20T13:31:52.087773
| 2021-10-26T16:17:19
| 2021-10-26T16:17:19
| 409,214,236
| 1
| 0
| null | 2021-09-29T18:57:53
| 2021-09-22T13:24:22
|
CSS
|
UTF-8
|
Python
| false
| false
| 7,797
|
py
|
from django.shortcuts import render
from rest_framework import generics, status, permissions
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.permissions import IsAuthenticated
from django.db.models import Q
from .serializers import RecipeTitleCreateSerializer, MyTokenObtainPairSerializer, CustomUserSerializer, RecipeBodyCreateSerializer, RecipeViewSerializer, User, IngredientSerializer, EquipmentSerializer, ProcedureSerializer, CommentSerializer, PostSerializer
from .models import Recipe, Ingredient, Procedure, Equipment, Post, Comment, RecipeBody
# Delete, Update, Show, Recipe
class RecipeDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Recipe.objects.all()
serializer_class = RecipeViewSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
# List Recipe
class RecipeList(generics.ListCreateAPIView):
queryset = Recipe.objects.all()[:12]
serializer_class = RecipeViewSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
# get recipes by username
class RecipeListByUser(generics.ListCreateAPIView):
queryset = Recipe.objects.all()
serializer_class = RecipeViewSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def get_queryset(self):
user = self.kwargs['user']
return Recipe.objects.filter(user=user)
# get recipes by title
class RecipeListByTitle(generics.ListCreateAPIView):
queryset = Recipe.objects.all()
serializer_class = RecipeViewSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def get_queryset(self):
title = self.kwargs['title']
return Recipe.objects.filter(title=title)
# recipe search
class RecipeSearch(generics.ListCreateAPIView):
serializer_class = RecipeViewSerializer
def get_queryset(self):
query = self.request.GET.get('q')
object_list = Recipe.objects.filter(Q(title__icontains=query))[:10]
return object_list
# Create Recipe Title
class RecipeTitleCreate(generics.ListCreateAPIView):
queryset = Recipe.objects.all()
serializer_class = RecipeTitleCreateSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(Recipe, self).form_valid(form)
#Create Recipe Body
class RecipeBodyCreate(generics.ListCreateAPIView):
queryset = RecipeBody.objects.all()
serializer_class = RecipeBodyCreateSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(RecipeBody, self).form_valid(form)
# Delete, Update, Show, Recipe Body
class RecipeBodyDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = RecipeBody.objects.all()
serializer_class = RecipeBodyCreateSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(RecipeBody, self).form_valid(form)
class ObtainTokenPairWithNameView(TokenObtainPairView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
serializer_class = MyTokenObtainPairSerializer
class CustomUserCreate(APIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def post(self, request, format='json'):
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
json = serializer.data
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Logout
class LogoutAndBlacklistRefreshTokenForUserView(APIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def post(self, request):
try:
refresh_token = request.data["refresh_token"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except Exception as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
# user views
class UserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = CustomUserSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = CustomUserSerializer
lookup_field = 'username'
# Ingredient Views
class IngredientList(generics.ListCreateAPIView):
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
permission_classes = (permissions.AllowAny,)
class IngredientDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
permission_classes = (permissions.AllowAny,)
# Procedure Views
class ProcedureList(generics.ListCreateAPIView):
queryset = Procedure.objects.all()
serializer_class = ProcedureSerializer
permission_classes = (permissions.AllowAny,)
class ProcedureDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Procedure.objects.all()
serializer_class = ProcedureSerializer
permission_classes = (permissions.AllowAny,)
# Equipment Views
class EquipmentList(generics.ListCreateAPIView):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
permission_classes = (permissions.AllowAny,)
class EquipmentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
permission_classes = (permissions.AllowAny,)
# Comment Views
class CommentList(generics.ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
# Post Views
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
class PostListByRecipe(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
# Get posts by recipe
def get_queryset(self):
recipe = self.kwargs['recipe']
return Post.objects.filter(recipe=recipe)
class PostDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
# Chat room test
def index(request):
return render(request, 'example/index.html')
def room(request, room_name):
return render(request, 'example/room.html', {
'room_name': room_name
})
|
[
"maxrobbins88@gmail.com"
] |
maxrobbins88@gmail.com
|
8f2480325079e70cc37f56576b090057722b838f
|
5800c906808d9c51eb48b2814e9c29ba4996908d
|
/TrazoDePoligonos_Rectangulo_MariaDelRosarioValentinMontiel_3601.py
|
b7b227a6e073aed6065804f5edf8c23537612d20
|
[] |
no_license
|
Rosario20-VM/Trazo-de-Poligonos
|
5326418720728ebc921ad51bc1451c331204dd35
|
aef602ffabd6d5bec2dede758003f75ea63978cb
|
refs/heads/master
| 2023-05-07T15:26:30.033676
| 2021-06-02T22:57:02
| 2021-06-02T22:57:02
| 371,850,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
#MARIA DEL ROSARIO VALENTIN MONTIEL
import matplotlib.pyplot as plt
def RectanguloDDA():
x1=2
y1=6
x2=10
y2=6
x3=2
y3=10
x4=2
y4=6
dx= abs(x2-x1)
dy= abs(y2-y1)
steps=0
color='r.'
if (dx)> (dy):
steps=(dx)
else:
steps=(dy)
xInc = float(dx / steps)
yInc = float(dy / steps)
xInc = round(xInc,1)
yInc = round(yInc,1)
dx1= abs(x3-x1)
dy1= abs(y3-y1)
steps1=0
color='r.'
if (dx1)> (dy1):
steps1=(dx1)
else:
steps1=(dy1)
xInc1 = float(dx1/ steps1)
yInc1 = float(dy1 / steps1)
xInc1 = round(xInc1,1)
yInc1 = round(yInc1,1)
for i in range(0, int(steps + 1)):
plt.plot(round(x1),round(y1),color)
plt.plot(round(x3),round(y3),color)
x1+=xInc
y1+=yInc
x3+=xInc
y3+=yInc
for i in range(0, int(steps1 +1)):
plt.plot(round(x1),round(y1),color)
plt.plot(round(x4),round(y4),color)
x4+=xInc1
y4+=yInc1
x1+=xInc1
y1+=yInc1
plt.show()
def RectanguloBresenham():
color='r.'
x1=2
y1=6
x2=10
y2=6
x3=2
y3=10
x4=10
y4=10
dx= abs(x2-x1)
dy= abs(y2-y1)
p = 2*dy - dx
x=x1
y=y1
dx1= abs(x4-x3)
dy1= abs(y4-y3)
p1 = 2*dy1 - dx1
xa=x3
ya=y3
dx2= abs(x3-x1)
dy2= abs(y3-y1)
p2= 2*dy2 - dx2
xb=x1
yb=y1
dx3= abs(x4-x2)
dy3= abs(y4-y2)
p3= 2*dy3 - dx3
xc=x2
yc=y2
for i in range(x, x2):
plt.plot(round(x),round(y), color)
if p < 0:
x= x + 1
p = p + 2* dy
else:
p = p + (2*dy) - (2*dx)
y=y+1
for i in range(xa, x4):
plt.plot(round(xa),round(ya), color)
if p1 < 0:
xa= xa+ 1
p1 = p1 + 2* dy1
else:
p1 = p1 + (2*dy1) - (2*dx1)
ya=ya+1
for i in range(yb, y3):
plt.plot(round(xb),round(yb), color)
if p2 < 0:
xb= xb + 1
p2 = p2 + 2* dy2
else:
p2 = p2 + (2*dy2) - (2*dx2)
yb = yb+1
for i in range(yc, y4+1):
plt.plot(round(xc),round(yc), color)
if p3 < 0:
xc= xc + 1
p3 = p3 + 2* dy3
else:
p3 = p3 + (2*dy3) - (2*dx3)
yc = yc+1
plt.show()
if __name__ == '__main__':
print("""
1) Algoritmo DDA
2) Algoritmo Bresenham
""")
opcion=input ("selecciona la opcion: ")
if opcion == "1":
RectanguloDDA()
else:
RectanguloBresenham()
|
[
"rosario6.mntl@gmail.com"
] |
rosario6.mntl@gmail.com
|
100259a17bad558578a1a3a506a259c32a830e29
|
c83aa035d452505f20586dddafd88c3e5f0dc622
|
/2020/PMC/PMC/PMC.py
|
f8b641d1e17df89b6a549fa6bd84b3f3ca67637f
|
[] |
no_license
|
trenticle/Projects
|
2650ec89025ca356d5e3eff81b3c3f2b12abc2aa
|
ecb4ded72118e355aa94433a1e30a6e97a3dfecd
|
refs/heads/master
| 2021-06-21T07:59:32.488780
| 2021-01-02T06:46:23
| 2021-01-02T06:46:23
| 155,902,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
student_grades = [9.1,8.8,7.5]
mysum = sum(student_grades)
length = len(student_grades)
mean = mysum / length
print(length)
max_value = max(student_grades)
print(max_value)
student_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]
print(student_grades.count(10.0))
username = "Python3"
print(username.lower())
student_grades = {"Mary": 9.1, "Sim": 8.8, "John": 7.5}
print(student_grades.values())
|
[
"trenticle@LIVE.COM"
] |
trenticle@LIVE.COM
|
d99fcf4ac48e691d6dbc64be98ab7be92e67452b
|
3570c4b8dd1792d595c32de27547214b63f95169
|
/tests/test_example.py
|
c9ed82e2ebf6ff0543f1204dbf393575fc373778
|
[
"Apache-2.0"
] |
permissive
|
CS5331-GROUP-7/rest-api-development
|
ec0d484662f0cd8caa7e575cf8c22d0658bfe43e
|
4d421497d2802fe1c5441bee1ea7d55d36df354e
|
refs/heads/master
| 2021-09-14T11:29:08.671090
| 2018-05-12T13:38:55
| 2018-05-12T13:38:55
| 122,959,571
| 2
| 0
|
Apache-2.0
| 2018-03-05T05:37:34
| 2018-02-26T11:21:47
|
Python
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
import pytest
import json
from flask import Flask, url_for
from src.service.views import ENDPOINT_LIST
def test_get_index(client):
page = client.get(url_for('views.index')) # can use the endpoint(method) name here
assert page.status_code == 200 # response code
data = json.loads(page.data) # response data
assert 'result' in data
assert 'status' in data
assert data['status']
assert data['result'] == ENDPOINT_LIST
if __name__ == '__main__':
pytest.main()
|
[
"tanxs93@gmail.com"
] |
tanxs93@gmail.com
|
e797642929d74abf07f38be4d559a60c4edc39c4
|
a7b07e14f58008e4c9567a9ae67429cedf00e1dc
|
/lib/jnpr/healthbot/swagger/models/rule_schema_variable.py
|
630dceaecb69d32efa58bd7ea4450d2121bdd4cb
|
[
"Apache-2.0"
] |
permissive
|
dmontagner/healthbot-py-client
|
3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9
|
0952e0a9e7ed63c9fe84879f40407c3327735252
|
refs/heads/master
| 2020-08-03T12:16:38.428848
| 2019-09-30T01:57:24
| 2019-09-30T01:57:24
| 211,750,200
| 0
| 0
|
Apache-2.0
| 2019-09-30T01:17:48
| 2019-09-30T01:17:47
| null |
UTF-8
|
Python
| false
| false
| 6,447
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaVariable(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'name': 'str',
'type': 'str',
'value': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name',
'type': 'type',
'value': 'value'
}
def __init__(self, description=None, name=None, type=None, value=None): # noqa: E501
"""RuleSchemaVariable - a model defined in Swagger""" # noqa: E501
self._description = None
self._name = None
self._type = None
self._value = None
self.discriminator = None
if description is not None:
self.description = description
self.name = name
self.type = type
if value is not None:
self.value = value
@property
def description(self):
"""Gets the description of this RuleSchemaVariable. # noqa: E501
Description about the variable # noqa: E501
:return: The description of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RuleSchemaVariable.
Description about the variable # noqa: E501
:param description: The description of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this RuleSchemaVariable. # noqa: E501
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:return: The name of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RuleSchemaVariable.
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:param name: The name of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
if name is not None and not re.search('^[a-zA-Z][a-zA-Z0-9_-]*$', name): # noqa: E501
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[a-zA-Z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._name = name
@property
def type(self):
"""Gets the type of this RuleSchemaVariable. # noqa: E501
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:return: The type of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RuleSchemaVariable.
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:param type: The type of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["int", "float", "string", "boolean", "device-group", "device", "sensor-argument"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def value(self):
"""Gets the value of this RuleSchemaVariable. # noqa: E501
Default value for the variable # noqa: E501
:return: The value of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this RuleSchemaVariable.
Default value for the variable # noqa: E501
:param value: The value of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaVariable):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
e64142acb1d5ae1057cb0115cbd5acc17749fddf
|
1ad512e9023e7ed43f8f3647733718ba65c20ce8
|
/apps/organization/urls.py
|
56bdc2ed1d6bf4eeb4d760e2f33f1943817437f2
|
[] |
no_license
|
caijunrong31/MxOnline2.7
|
e660bfa0619c4ef94ce9da614388370ec4c74725
|
a60054a2938fcc0707be4c0903df1afa75e9cbb0
|
refs/heads/master
| 2020-03-10T20:18:48.010733
| 2018-04-17T14:16:06
| 2018-04-17T14:16:06
| 129,567,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
# _*_ coding: utf-8 _*_
__author__ = 'cai'
__date__ = '2017/12/3 11:18'
from django.conf.urls import url, include
from organization.views import OrgView, AddUserAskView, TeacherListView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView
from organization.views import TeacherDetailView
urlpatterns = [
# 课程机构列表页
url(r'^list/$', OrgView.as_view(), name="org_list"),
url(r'^add_ask/$', AddUserAskView.as_view(), name="add_ask"),
url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"),
url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"),
url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"),
url(r'^org_teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"),
# 机构收藏
url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"),
# 讲师列表页
url(r'^teacher/list$', TeacherListView.as_view(), name="teacher_list"),
# 讲师详情页
url(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail"),
]
|
[
"13113389477@163.com"
] |
13113389477@163.com
|
c12a7e13c5304a8904fdf8469d845c43f43baafe
|
c5a8d48d902523e88e620e22930ba4dfd8578cdf
|
/api/journal/migrations/0028_invoice_status.py
|
d0785e46a7082a4cdb83eec6398572caa795544d
|
[] |
no_license
|
stuartquin/soitgoes
|
1f8cd5dbff2a33f3dd0e89025726c8ac605dbc8c
|
30f619db560607847d092ec6a897484fea5f4db5
|
refs/heads/master
| 2023-05-31T15:49:52.484803
| 2023-05-19T08:45:10
| 2023-05-19T08:45:10
| 49,604,474
| 3
| 0
| null | 2020-03-23T11:51:00
| 2016-01-13T21:52:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-03-25 16:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0027_auto_20170325_1056'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='status',
field=models.CharField(choices=[('DRAFT', 'DRAFT'), ('ISSUED', 'ISSUED'), ('PAID', 'PAID')], default='DRAFT', max_length=128),
),
]
|
[
"stuart.quin@gmail.com"
] |
stuart.quin@gmail.com
|
f0435c5ea05ff27e76eb5d6133f40a45dacdc85a
|
369838e937e11fecc4e97362f83e5f588961b614
|
/poly_gen_anaylisis.py
|
290c40e576a9c838daa48bcca650eb8d9aa5a220
|
[] |
no_license
|
abhishek-pes/CCBD-Polygon-Generator
|
6a8559b1c7128c0dee272abaf0401ede80e0a48b
|
beb3249fb379994c72229374588dbfbb4661a2e9
|
refs/heads/main
| 2023-05-08T22:55:14.811871
| 2021-05-25T16:03:54
| 2021-05-25T16:03:54
| 367,890,885
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,462
|
py
|
'''
currently this program works well while generating around 50-70 polygons and each polygon can have vertices from 10-500.
But as the number of vertices in polygons increase it becomes more star like.
The final wkt file generated can be plotted with the help of qgis software (open-source)
'''
from random import sample,choice
from numpy import arctan2,random,sin,cos,degrees
import math
import time
from datetime import timedelta
import os
import matplotlib.pyplot as plt
grid = {"x":[],"y":[]}
f = open("demo.wkt","w")
'''
The number of polygons can be more than 20 also , but for testing purpose I have set it as 4 - 20
'''
n = int(input("Enter the number of polygons to generate (4 - 20): "))
begin = time.monotonic()
def generate_random_polygons(n):
for i in range(n):
'''
The number of vertices can be from 10 - 500 but for testing purpose , It has been set from 10 - 50
'''
number_of_vertices = random.randint(10,50) #number of vertices in each polygon can be from 10-500
if(i == 0):
x_boundary,y_boundary = 1,500
else:
x_boundary = x_boundary+200 #shifting the x and y axis so that they do not plot over each other.
y_boundary = y_boundary + 210
get_polygons(number_of_vertices,x_boundary,y_boundary,i)
#function to get center of mass
def get_com(grid):
center = [sum(grid["x"])/len(grid["x"]) , sum(grid["y"])/len(grid["y"])]
return center
#funtion to get distance and polar angle
def get_sqr_polar(point,com):
return [math.atan2(point[1] - com[1] , point[0]-com[0]) , (point[0]-com[0])**2 + (point[1] - com[1])**2]
def get_polygons(n,x_boundary,y_boundary,i):
x = []
y = []
x_hieght = 1
y_height = 500
if(i%2 == 0):
x_hieght = x_hieght+100 #Just trying to shift the coordinates up and down , so that it can spread more evenly and not clutter at one place.
y_height = y_height+140
if(i%3 == 0):
x_hieght = x_hieght-140
y_height = y_height-210
x,y = sample(range(x_boundary,y_boundary),n),sample(range(x_hieght,y_height),n)
grid["x"] = x
grid["y"] = y
com = get_com(grid)
final_l = []
for i in range(len(grid["x"])):
point = [grid["x"][i],grid["y"][i]]
pd = get_sqr_polar(point,com)
final_l.append(pd)
'''
The zipping and the sorting below is just used to sort the final_l (list) in decreasing order of the squared polar distance.
'''
zipped_x = zip(final_l,x)
sorted_x = sorted(zipped_x)
sorted_list1 = [element for _, element in sorted_x]
sorted_list1 = sorted_list1[::-1]
zipped_y = zip(final_l,y)
sorted_y = sorted(zipped_y)
sorted_list2 = [element for _, element in sorted_y]
sorted_list2 = sorted_list2[::-1]
#function to write the generated points in the wkt file
write_to_file(sorted_list1,sorted_list2)
#function to store the points in wkt file format (later to change it as .csv file)
def write_to_file(sorted_list1,sorted_list2):
f.write("POLYGON ((")
for i in range(len(sorted_list1)):
f.write(str(sorted_list1[i])+" "+str(sorted_list2[i])+",")
f.write(str(sorted_list1[0])+" "+str(sorted_list2[0])+"))\n")
generate_random_polygons(n)
end = time.monotonic()
print("Time = ",timedelta(seconds=end-begin))
f.close()
file_size = os.path.getsize('demo.wkt')
print("File size = ", file_size, "bytes")
|
[
"abhishekmis40@gmail.com"
] |
abhishekmis40@gmail.com
|
94a1445b5d73052a0e9fbc2caed1e94ae674a0da
|
4f2b9848ee1cf41017b424c7367a240f93625e86
|
/doc/tutorial/config.py
|
cdb60e8b8cc1f8f18664a9d5edb55b488c038574
|
[
"Apache-2.0"
] |
permissive
|
martin-dostal-eli/python-icat
|
f5cc0e497376d7264db1af2bb9ad588e29a9bd7b
|
8c882a3095f2dd7276a7c0edba44dc9b3ef4eedd
|
refs/heads/master
| 2023-08-18T02:12:30.267009
| 2021-07-20T11:24:25
| 2021-07-20T11:24:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#! /usr/bin/python
from __future__ import print_function
import icat
import icat.config
config = icat.config.Config(needlogin=False, ids=False)
client, conf = config.getconfig()
print("Connect to %s\nICAT version %s" % (conf.url, client.apiversion))
|
[
"rolf.krahl@helmholtz-berlin.de"
] |
rolf.krahl@helmholtz-berlin.de
|
c17852429364551ea37ca25d24443315fc54f521
|
89aced396640fed8960a05ad528c55ec6733da25
|
/handler/file_handler.py
|
927f9ed59e428988f4cfa06e806565deedfa966c
|
[] |
no_license
|
jubileus/tornado
|
33c439775ab12fb42235667f3027d98319a14325
|
94a8d65908a23ab6c4c653c7c54473a5ec7e081a
|
refs/heads/master
| 2021-01-10T09:28:40.084949
| 2015-11-02T08:05:24
| 2015-11-02T08:05:24
| 45,165,814
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
# coding=UTF-8
import os
from handler.base_handler import BaseHandler
__author__ = 'jubileus'
file_path = '/home/jubileus/'
class FileHandler(BaseHandler):
def data_received(self, chunk):
pass
def get(self):
self.render('file.html')
class UploadFileHandler(BaseHandler):
def data_received(self, chunk):
pass
def post(self):
# upload_path = os.path.join(os.path.dirname(__file__), 'files')
upload_path = os.path.join(file_path, 'files')
# 提取表单中‘name’为‘file’的文件元数据
file_metas = self.request.files['file']
for meta in file_metas:
filename = meta['filename']
filepath = os.path.join(upload_path, filename)
# 有些文件需要已二进制的形式存储,实际中可以更改
with open(filepath, 'wb') as up:
up.write(meta['body'])
self.write('''
<html>
<head><title>Upload File</title></head>
<body>
<h3>上传成功</h3>
<a href="/file">返回</a>
</body>
</html>
''')
class DownloadFileHandler(BaseHandler):
def data_received(self, chunk):
pass
def post(self):
filename = self.get_argument('filename')
if filename:
try:
print('i download file handler : ', filename)
download_path = os.path.join(file_path, 'files', filename)
# 读取的模式需要根据实际情况进行修改
exist = False
with open(download_path, 'rb') as f:
# Content-Type这里我写的时候是固定的了,也可以根据实际情况传值进来
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + filename)
exist = True
while True:
data = f.read(1024)
if not data:
break
self.write(data)
# 记得有finish哦
if exist:
self.finish()
else:
self.write('''
<html>
<head><title>Download File</title></head>
<body>
<h3>文件不存在</h3>
<a href="/file">返回</a>
</body>
</html>
''')
except Exception:
self.write('''
<html>
<head><title>Download File</title></head>
<body>
<h3>文件不存在</h3>
<a href="/file">返回</a>
</body>
</html>
''')
else:
self.write('''
<html>
<head><title>Download File</title></head>
<body>
<h3>文件不存在</h3>
<a href="/file">返回</a>
</body>
</html>
''')
|
[
"1509483682@qq.com"
] |
1509483682@qq.com
|
78ae44a87cf973b283dc8f40c51244cc070b8456
|
e8500b4a09882ee34af21bee4f522546f07ff7e9
|
/src/michelson_kernel/docs.py
|
a0df3b3b380628618798ca4674e8446d2db70d9b
|
[
"MIT"
] |
permissive
|
kaellis/pytezos
|
71a8fa29f5d48fed1e62698fa65c32c162e440ef
|
ab88bb4138094fbe7fe6852f5a908423bf9e9d60
|
refs/heads/master
| 2023-08-31T19:03:19.877244
| 2021-11-02T03:40:00
| 2021-11-02T03:40:00
| 423,692,515
| 0
| 0
|
MIT
| 2021-11-02T03:40:01
| 2021-11-02T03:16:51
| null |
UTF-8
|
Python
| false
| false
| 16,985
|
py
|
docs = {
'ABS': 'ABS\nABS :: int : A => nat : A\nObtain the absolute value of an integer',
'ADD': 'ADD\n'
'ADD :: nat : nat : A => nat : A\n'
'ADD :: nat : int : A => int : A\n'
'ADD :: int : nat : A => int : A\n'
'ADD :: int : int : A => int : A\n'
'ADD :: timestamp : int : A => timestamp : A\n'
'ADD :: int : timestamp : A => timestamp : A\n'
'ADD :: mutez : mutez : A => mutez : A\n'
'ADD :: bls12_381_g1 : bls12_381_g1 : A => bls12_381_g1 : A\n'
'ADD :: bls12_381_g2 : bls12_381_g2 : A => bls12_381_g2 : A\n'
'ADD :: bls12_381_fr : bls12_381_fr : A => bls12_381_fr : A\n'
'Add two numerical values',
'ADDRESS': 'ADDRESS\nADDRESS :: contract ty1 : A => address : A\nPush the address of a contract',
'AMOUNT': 'AMOUNT\nAMOUNT :: A => mutez : A\nPush the amount of the current transaction',
'AND': 'AND\n'
'AND :: bool : bool : A => bool : A\n'
'AND :: nat : nat : A => nat : A\n'
'AND :: int : nat : A => nat : A\n'
'Boolean and bitwise AND',
'APPLY': 'APPLY\n'
'APPLY :: ty1 : lambda ( pair ty1 ty2 ) ty3 : A => lambda ty2 ty3 : A\n'
'Partially apply a tuplified function from the stack',
'BALANCE': 'BALANCE\nBALANCE :: A => mutez : A\nPush the current amount of mutez of the executing contract',
'BLAKE2B': 'BLAKE2B\nBLAKE2B :: bytes : A => bytes : A\nCompute a Blake2B cryptographic hash',
'CAR': 'CAR\nCAR :: pair ty1 ty2 : A => ty1 : A\nAccess the left part of a pair',
'CAST': '',
'CDR': 'CDR\nCDR :: pair ty1 ty2 : A => ty2 : A\nAccess the right part of a pair',
'CHAIN_ID': 'CHAIN_ID\nCHAIN_ID :: A => chain_id : A\nPush the chain identifier',
'CHECK_SIGNATURE': 'CHECK_SIGNATURE\n'
'CHECK_SIGNATURE :: key : signature : bytes : A => bool : A\n'
'Verify signature of bytes by key',
'COMPARE': 'COMPARE\nCOMPARE :: cty : cty : A => int : A\nCompare two values',
'CONCAT': 'CONCAT\n'
'CONCAT :: string : string : A => string : A\n'
'CONCAT :: list string : A => string : A\n'
'CONCAT :: bytes : bytes : A => bytes : A\n'
'CONCAT :: list bytes : A => bytes : A\n'
'Concatenate a string, byte sequence, string list or byte sequence list',
'CONS': 'CONS\nCONS :: ty1 : list ty1 : A => list ty1 : A\nPrepend an element to a list',
'CONTRACT': 'CONTRACT ty\n' 'CONTRACT ty :: address : A => option ( contract ty ) : A\n' 'Cast an address to a typed contract',
'CREATE_ACCOUNT': '\nPush an account creation operation',
'CREATE_CONTRACT': 'CREATE_CONTRACT { parameter ty1 ; storage ty2 ; code instr1 }\n'
'CREATE_CONTRACT { parameter ty1 ; storage ty2 ; code instr1 } :: option key_hash : mutez : ty2 '
': A => operation : address : A\n'
'Push a contract creation operation',
'DIG': 'DIG n\nDIG n :: A @ ( ty1 : B ) => ty1 : ( A @ B )\nRetrieve the n\\ th element of the stack',
'DIP': 'DIP instr\nDIP instr :: ty : B => ty : C\nRun code protecting the top of the stack',
'DIPN': 'DIP n instr\nDIP n instr :: A @ B => A @ C\nRun code protecting the n topmost elements of the stack',
'DROP': 'DROP n\nDROP n :: A @ B => B\nDrop the top n elements of the stack',
'DUG': 'DUG n\nDUG n :: ty1 : ( A @ B ) => A @ ( ty1 : B )\nInsert the top element at depth n',
'DUP': 'DUP\nDUP :: ty1 : A => ty1 : ty1 : A\nDuplicate the top of the stack',
'DUPN': 'DUP n\nDUP n :: A @ ty1 : B => ty1 : A @ ty1 : B\nDuplicate the n\\ th element of the stack',
'EDIV': 'EDIV\n'
'EDIV :: nat : nat : A => option ( pair nat nat ) : A\n'
'EDIV :: nat : int : A => option ( pair int nat ) : A\n'
'EDIV :: int : nat : A => option ( pair int nat ) : A\n'
'EDIV :: int : int : A => option ( pair int nat ) : A\n'
'EDIV :: mutez : nat : A => option ( pair mutez mutez ) : A\n'
'EDIV :: mutez : mutez : A => option ( pair nat mutez ) : A\n'
'Euclidean division',
'EMPTY_BIG_MAP': 'EMPTY_BIG_MAP kty vty\n'
'EMPTY_BIG_MAP kty vty :: A => big_map kty vty : A\n'
'Build a new, empty big_map from kty to vty',
'EMPTY_MAP': 'EMPTY_MAP kty vty\nEMPTY_MAP kty vty :: A => map kty vty : A\nBuild a new, empty map from kty to vty',
'EMPTY_SET': 'EMPTY_SET cty\nEMPTY_SET cty :: A => set cty : A\nBuild a new, empty set for elements of type cty',
'EQ': 'EQ\nEQ :: int : A => bool : A\nCheck that the top of the stack equals zero',
'EXEC': 'EXEC\nEXEC :: ty1 : lambda ty1 ty2 : A => ty2 : A\nExecute a function from the stack',
'EXPAND': '',
'FAILWITH': 'FAILWITH\nFAILWITH :: ty1 : A => B\nExplicitly abort the current program',
'GE': 'GE\nGE :: int : A => bool : A\nCheck that the top of the stack is greater than or equal to zero',
'GET': 'GET\n'
'GET :: kty : map kty vty : A => option vty : A\n'
'GET :: kty : big_map kty vty : A => option vty : A\n'
'Access an element in a map or big_map',
'GETN': 'GET n\n'
'GET ( 0 ) :: ty : A => ty : A\n'
"GET ( 2 * n ) :: pair ty0 .. tyN ty' : A => ty' : A\n"
"GET ( 2 * n + 1 ) :: pair ty0 .. tyN ty' ty'' : A => ty' : A\n"
'Access an element or a sub comb in a right comb',
'GET_AND_UPDATE': 'GET_AND_UPDATE\n'
'GET_AND_UPDATE :: kty : option vty : map kty vty : A => option vty : map kty vty : A\n'
'GET_AND_UPDATE :: kty : option vty : big_map kty vty : A => option vty : big_map kty vty : A\n'
'A combination of the GET and UPDATE instructions',
'GT': 'GT\nGT :: int : A => bool : A\nCheck that the top of the stack is greater than zero',
'HASH_KEY': 'HASH_KEY\nHASH_KEY :: key : A => key_hash : A\nCompute the Base58Check of a public key',
'IF': 'IF instr1 instr2\nIF instr1 instr2 :: bool : A => B\nConditional branching',
'IF_CONS': 'IF_CONS instr1 instr2\nIF_CONS instr1 instr2 :: list ty : A => B\nInspect a list',
'IF_LEFT': 'IF_LEFT instr1 instr2\nIF_LEFT instr1 instr2 :: or ty1 ty2 : A => B\nInspect a value of a union',
'IF_NONE': 'IF_NONE instr1 instr2\nIF_NONE instr1 instr2 :: option ty1 : A => B\nInspect an optional value',
'IMPLICIT_ACCOUNT': 'IMPLICIT_ACCOUNT\n' 'IMPLICIT_ACCOUNT :: key_hash : A => contract unit : A\n' 'Create an implicit account',
'INT': 'INT\n'
'INT :: nat : A => int : A\n'
'INT :: bls12_381_fr : A => int : A\n'
'Convert a natural number or a BLS12-381 field element to an integer',
'ISNAT': 'ISNAT\nISNAT :: int : A => option nat : A\nConvert a non-negative integer to a natural number',
'ITER': 'ITER instr\n'
'ITER instr :: list ty : A => A\n'
'ITER instr :: set cty : A => A\n'
'ITER instr :: map kty vty : A => A\n'
'Iterate over a set, list or map',
'JOIN_TICKETS': 'JOIN_TICKETS\n'
'JOIN_TICKETS :: pair ( ticket cty ) ( ticket cty ) : A => option ( ticket cty ) : A\n'
'Join two tickets into one',
'KECCAK': 'KECCAK\nKECCAK :: bytes : A => bytes : A\nCompute a Keccak-256 cryptographic hash',
'LAMBDA': 'LAMBDA ty1 ty2 instr\nLAMBDA ty1 ty2 instr :: A => lambda ty1 ty2 : A\nPush a lambda onto the stack',
'LE': 'LE\nLE :: int : A => bool : A\nCheck that the top of the stack is less than or equal to zero',
'LEFT': 'LEFT ty2\nLEFT ty2 :: ty1 : A => or ty1 ty2 : A\nWrap a value in a union (left case)',
'LEVEL': 'LEVEL\nLEVEL :: A => nat : A\nPush the current block level',
'LOOP': 'LOOP instr\nLOOP instr :: bool : A => A\nA generic loop',
'LOOP_LEFT': 'LOOP_LEFT instr\nLOOP_LEFT instr :: or ty1 ty2 : A => ty2 : A\nLoop with accumulator',
'LSL': 'LSL\nLSL :: nat : nat : A => nat : A\nLogically left shift a natural number',
'LSR': 'LSR\nLSR :: nat : nat : A => nat : A\nLogically right shift a natural number',
'LT': 'LT\nLT :: int : A => bool : A\nCheck that the top of the stack is less than zero',
'MAP': 'MAP instr\n'
'MAP instr :: list ty : A => list ty2 : A\n'
'MAP instr :: map kty ty1 : A => map kty ty2 : A\n'
'Apply instr to each element of a list or map.',
'MEM': 'MEM\n'
'MEM :: cty : set cty : A => bool : A\n'
'MEM :: kty : map kty vty : A => bool : A\n'
'MEM :: kty : big_map kty vty : A => bool : A\n'
'Check for the presence of a binding for a key in a map, set or big_map',
'MUL': 'MUL\n'
'MUL :: nat : nat : A => nat : A\n'
'MUL :: nat : int : A => int : A\n'
'MUL :: int : nat : A => int : A\n'
'MUL :: int : int : A => int : A\n'
'MUL :: mutez : nat : A => mutez : A\n'
'MUL :: nat : mutez : A => mutez : A\n'
'MUL :: bls12_381_g1 : bls12_381_fr : A => bls12_381_g1 : A\n'
'MUL :: bls12_381_g2 : bls12_381_fr : A => bls12_381_g2 : A\n'
'MUL :: bls12_381_fr : bls12_381_fr : A => bls12_381_fr : A\n'
'MUL :: nat : bls12_381_fr : A => bls12_381_fr : A\n'
'MUL :: int : bls12_381_fr : A => bls12_381_fr : A\n'
'MUL :: bls12_381_fr : nat : A => bls12_381_fr : A\n'
'MUL :: bls12_381_fr : int : A => bls12_381_fr : A\n'
'Multiply two numerical values',
'NEG': 'NEG\n'
'NEG :: nat : A => int : A\n'
'NEG :: int : A => int : A\n'
'NEG :: bls12_381_g1 : A => bls12_381_g1 : A\n'
'NEG :: bls12_381_g2 : A => bls12_381_g2 : A\n'
'NEG :: bls12_381_fr : A => bls12_381_fr : A\n'
'Negate a numerical value',
'NEQ': 'NEQ\nNEQ :: int : A => bool : A\nCheck that the top of the stack is not equal to zero',
'NEVER': 'NEVER\nNEVER :: never : A => B\nClose an absurd branch',
'NIL': 'NIL ty\nNIL ty :: A => list ty : A\nPush an empty list',
'NONE': 'NONE ty\nNONE ty :: A => option ty : A\nPush the absent optional value',
'NOOP': '{}\n{} :: A => A\nEmpty instruction sequence',
'NOT': 'NOT\n'
'NOT :: bool : A => bool : A\n'
'NOT :: nat : A => int : A\n'
'NOT :: int : A => int : A\n'
'Boolean negation and bitwise complement',
'NOW': 'NOW\nNOW :: A => timestamp : A\nPush block timestamp',
'OR': 'OR\nOR :: bool : bool : A => bool : A\nOR :: nat : nat : A => nat : A\nBoolean and bitwise OR',
'PACK': 'PACK\nPACK :: ty : A => bytes : A\nSerialize data',
'PAIR': "PAIR\nPAIR :: ty1 : ty2 : A => pair ty1 ty2 : A\nBuild a pair from the stack's top two elements",
'PAIRING_CHECK': 'PAIRING_CHECK\n'
'PAIRING_CHECK :: list ( pair bls12_381_g1 bls12_381_g2 ) : A => bool : A\n'
'Check a BLS12-381 pairing',
'PAIRN': 'PAIR n\n'
'PAIR n :: ty1 : .... : tyN : A => pair ty1 .... tyN : A\n'
'Fold n values on the top of the stack into a right comb',
'PUSH': 'PUSH ty x\nPUSH ty x :: A => ty1 : A\nPush a constant value of a given type onto the stack',
'READ_TICKET': 'READ_TICKET\n'
'READ_TICKET :: ticket cty : A => pair address cty nat : ticket cty : A\n'
'Retrieve the information stored in a ticket. Also return the ticket.',
'RENAME': '',
'RIGHT': 'RIGHT ty1\nRIGHT ty1 :: ty2 : A => or ty1 ty2 : A\nWrap a value in a union (right case)',
'SAPLING_EMPTY_STATE': 'SAPLING_EMPTY_STATE ms\n'
'SAPLING_EMPTY_STATE ms :: A => sapling_state ms : A\n'
'Pushes an empty Sapling state on the stack',
'SAPLING_VERIFY_UPDATE': 'SAPLING_VERIFY_UPDATE\n'
'SAPLING_VERIFY_UPDATE :: sapling_transaction ms : sapling_state ms : A => option ( pair '
'int ( sapling_state ms ) ) : A\n'
'Verify and apply a transaction on a Sapling state',
'SELF': 'SELF\nSELF :: A => contract ty : A\nPush the current contract',
'SELF_ADDRESS': 'SELF_ADDRESS\nSELF_ADDRESS :: A => address : A\nPush the address of the current contract',
'SENDER': 'SENDER\nSENDER :: A => address : A\nPush the contract that initiated the current internal transaction',
'SEQ': 'instr1 ; instr2\ninstr1 ; instr2 :: A => C\nInstruction sequence',
'SET_DELEGATE': 'SET_DELEGATE\nSET_DELEGATE :: option key_hash : A => operation : A\nPush a delegation operation',
'SHA256': 'SHA256\nSHA256 :: bytes : A => bytes : A\nCompute a SHA-256 cryptographic hash',
'SHA3': 'SHA3\nSHA3 :: bytes : A => bytes : A\nCompute a SHA3-256 cryptographic hash',
'SHA512': 'SHA512\nSHA512 :: bytes : A => bytes : A\nCompute a SHA-512 cryptographic hash',
'SIZE': 'SIZE\n'
'SIZE :: set cty : A => nat : A\n'
'SIZE :: map kty vty : A => nat : A\n'
'SIZE :: list ty : A => nat : A\n'
'SIZE :: string : A => nat : A\n'
'SIZE :: bytes : A => nat : A\n'
'Obtain size of a string, list, set, map or byte sequence bytes',
'SLICE': 'SLICE\n'
'SLICE :: nat : nat : string : A => option string : A\n'
'SLICE :: nat : nat : bytes : A => option bytes : A\n'
'Obtain a substring or subsequence of a string respectively byte sequence bytes',
'SOME': 'SOME\nSOME :: ty1 : A => option ty1 : A\nWrap an existing optional value',
'SOURCE': 'SOURCE\nSOURCE :: A => address : A\nPush the contract that initiated the current transaction',
'SPLIT_TICKET': 'SPLIT_TICKET\n'
'SPLIT_TICKET :: ticket cty : pair nat nat : A => option ( pair ( ticket cty ) ( ticket cty ) ) : '
'A\n'
'Split a ticket in two',
'STEPS_TO_QUOTA': '\nPush the remaining steps before the contract execution must terminate',
'SUB': 'SUB\n'
'SUB :: nat : nat : A => int : A\n'
'SUB :: nat : int : A => int : A\n'
'SUB :: int : nat : A => int : A\n'
'SUB :: int : int : A => int : A\n'
'SUB :: timestamp : int : A => timestamp : A\n'
'SUB :: timestamp : timestamp : A => int : A\n'
'SUB :: mutez : mutez : A => mutez : A\n'
'Subtract two numerical values',
'SWAP': 'SWAP\nSWAP :: ty1 : ty2 : A => ty2 : ty1 : A\nSwap the top two elements of the stack',
'TICKET': 'TICKET\nTICKET :: cty : nat : A => ticket cty : A\nCreate a ticket',
'TOP': '',
'TOTAL_VOTING_POWER': 'TOTAL_VOTING_POWER\n' 'TOTAL_VOTING_POWER :: A => nat : A\n' 'Return the total voting power of all contracts',
'TRANSFER_TOKENS': 'TRANSFER_TOKENS\n'
'TRANSFER_TOKENS :: ty : mutez : contract ty : A => operation : A\n'
'Push a transaction operation',
'UNIT': 'UNIT\nUNIT :: A => unit : A\nPush the unit value onto the stack',
'UNPACK': 'UNPACK ty\nUNPACK ty :: bytes : A => option ty : A\nDeserialize data, if valid',
'UNPAIR': 'UNPAIR\nUNPAIR :: pair ty1 ty2 : A => ty1 : ty2 : A\nSplit a pair into its components',
'UNPAIRN': 'UNPAIR n\n'
'UNPAIR n :: pair ty1 .... tyN : A => ty1 : .... : tyN : A\n'
'Unfold n values from a right comb on the top of the stack',
'UPDATE': 'UPDATE\n'
'UPDATE :: cty : bool : set cty : A => set cty : A\n'
'UPDATE :: kty : option vty : map kty vty : A => map kty vty : A\n'
'UPDATE :: kty : option vty : big_map kty vty : A => big_map kty vty : A\n'
'Add or remove an element in a map, big_map or set',
'UPDATEN': 'UPDATE n\n'
'UPDATE 0 :: ty1 : ty2 : A => ty1 : A\n'
"UPDATE ( 2 * n ) :: ty' : pair ty0 .. tyN ty'' : A => pair ty0 .. tyN ty' : A\n"
"UPDATE ( 2 * n + 1 ) :: ty' : pair ty0 .. tyN ty'' ty''' : A => pair ty0 .. tyN ty' ty''' : A\n"
'Update an element or a sub comb in a right comb',
'VOTING_POWER': 'VOTING_POWER\n' 'VOTING_POWER :: key_hash : A => nat : A\n' 'Return the voting power of a given contract',
'XOR': 'XOR\nXOR :: bool : bool : A => bool : A\nXOR :: nat : nat : A => nat : A\nBoolean and bitwise eXclusive OR',
'address': 'address\nAddress of an untyped contract',
'big_map': 'big_map kty vty\nA lazily deserialized map from kty to vty',
'bls12_381_fr': 'bls12_381_fr\nAn element of the BLS12-381 scalar field F\\ :sub:r',
'bls12_381_g1': 'bls12_381_g1\nA point on the BLS12-381 curve G\\ :sub:1',
'bls12_381_g2': 'bls12_381_g2\nA point on the BLS12-381 curve G\\ :sub:2',
'bool': 'bool\nA boolean',
'bytes': 'bytes\nA sequence of bytes',
'chain_id': 'chain_id\nA chain identifier',
'contract': "contract type\nAddress of a contract, where type is the contract's parameter type",
'int': 'int\nAn arbitrary-precision integer',
'key': 'key\nA public cryptographic key',
'key_hash': 'key_hash\nA hash of a public cryptographic key',
'lambda': 'lambda ty1 ty2\nA lambda with given parameter and return types',
'list': 'list type\nA single, immutable, homogeneous linked list',
'map': 'map kty vty\nAn immutable map from kty to vty',
'mutez': 'mutez\nA specific type for manipulating tokens',
'nat': 'nat\nAn arbitrary-precision natural number',
'never': 'never\nThe empty type',
'operation': 'operation\nAn internal operation emitted by a contract',
'option': 'option ty\nAn optional value',
'or': 'or ty1 ty2\nA union of two types',
'pair': 'pair ty1 ty2\nA pair or tuple of values',
'sapling_state': 'sapling_state n\nA Sapling state',
'sapling_transaction': 'sapling_transaction n\nA Sapling transaction',
'set': 'set cty\nAn immutable set of comparable values of type cty',
'signature': 'signature\nA cryptographic signature',
'string': 'string\nA string of characters',
'ticket': 'ticket cty\nA ticket used to authenticate information of type cty',
'timestamp': 'timestamp\nA real-world date.',
'unit': 'unit\nThe type whose only value is Unit',
}
|
[
"noreply@github.com"
] |
kaellis.noreply@github.com
|
964881a63aa70e592bbbea9c87df3c6f601420fc
|
afe57349119d3213a825999f26271c76df3ec871
|
/aplib.py
|
91067e8cdcc06c4cd8cc518271a9ff159243f8ae
|
[] |
no_license
|
ActorExpose/aplib-ripper
|
211af16de530404982975a1fb276de0bbbb08f9d
|
11383396bec977d352574266999939bd000227bd
|
refs/heads/master
| 2022-02-05T03:01:32.977564
| 2019-05-21T02:39:25
| 2019-05-21T02:39:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,869
|
py
|
# this is a standalone single-file merge of aplib compression and decompression
# taken from my own library Kabopan http://code.google.com/p/kabopan/
# (no other clean-up or improvement)
# Ange Albertini, BSD Licence, 2007-2011
# from kbp\comp\_lz77.py ##################################################
def find_longest_match(s, sub):
"""returns the number of byte to look backward and the length of byte to copy)"""
if sub == "":
return 0, 0
limit = len(s)
dic = s[:]
l = 0
offset = 0
length = 0
first = 0
word = ""
word += sub[l]
pos = dic.rfind(word, 0, limit + 1)
if pos == -1:
return offset, length
offset = limit - pos
length = len(word)
dic += sub[l]
while l < len(sub) - 1:
l += 1
word += sub[l]
pos = dic.rfind(word, 0, limit + 1)
if pos == -1:
return offset, length
offset = limit - pos
length = len(word)
dic += sub[l]
return offset, length
# from _misc.py ###############################
def int2lebin(value, size):
"""ouputs value in binary, as little-endian"""
result = ""
for i in xrange(size):
result = result + chr((value >> (8 * i)) & 0xFF )
return result
def modifystring(s, sub, offset):
"""overwrites 'sub' at 'offset' of 's'"""
return s[:offset] + sub + s[offset + len(sub):]
def getbinlen(value):
"""return the bit length of an integer"""
result = 0
if value == 0:
return 1
while value != 0:
value >>= 1
result += 1
return result
# from kbp\_bits.py #################################
class _bits_compress():
"""bit machine for variable-sized auto-reloading tag compression"""
def __init__(self, tagsize):
"""tagsize is the number of bytes that takes the tag"""
self.out = ""
self.__tagsize = tagsize
self.__tag = 0
self.__tagoffset = -1
self.__maxbit = (self.__tagsize * 8) - 1
self.__curbit = 0
self.__isfirsttag = True
def getdata(self):
"""builds an output string of what's currently compressed:
currently output bit + current tag content"""
tagstr = int2lebin(self.__tag, self.__tagsize)
return modifystring(self.out, tagstr, self.__tagoffset)
def write_bit(self, value):
"""writes a bit, make space for the tag if necessary"""
if self.__curbit != 0:
self.__curbit -= 1
else:
if self.__isfirsttag:
self.__isfirsttag = False
else:
self.out = self.getdata()
self.__tagoffset = len(self.out)
self.out += "".join(["\x00"] * self.__tagsize)
self.__curbit = self.__maxbit
self.__tag = 0
if value:
self.__tag |= (1 << self.__curbit)
return
def write_bitstring(self, s):
"""write a string of bits"""
for c in s:
self.write_bit(0 if c == "0" else 1)
return
def write_byte(self, b):
"""writes a char or a number"""
assert len(b) == 1 if isinstance(b, str) else 0 <= b <= 255
self.out += b[0:1] if isinstance(b, str) else chr(b)
return
def write_fixednumber(self, value, nbbit):
"""write a value on a fixed range of bits"""
for i in xrange(nbbit - 1, -1, -1):
self.write_bit( (value >> i) & 1)
return
def write_variablenumber(self, value):
assert value >= 2
length = getbinlen(value) - 2 # the highest bit is 1
self.write_bit(value & (1 << length))
for i in xrange(length - 1, -1, -1):
self.write_bit(1)
self.write_bit(value & (1 << i))
self.write_bit(0)
return
class _bits_decompress():
"""bit machine for variable-sized auto-reloading tag decompression"""
def __init__(self, data, tagsize):
self.__curbit = 0
self.__offset = 0
self.__tag = None
self.__tagsize = tagsize
self.__in = data
self.out = ""
def getoffset(self):
"""return the current byte offset"""
return self.__offset
# def getdata(self):
# return self.__lzdata
def read_bit(self):
"""read next bit from the stream, reloads the tag if necessary"""
if self.__curbit != 0:
self.__curbit -= 1
else:
self.__curbit = (self.__tagsize * 8) - 1
self.__tag = ord(self.read_byte())
for i in xrange(self.__tagsize - 1):
self.__tag += ord(self.read_byte()) << (8 * (i + 1))
bit = (self.__tag >> ((self.__tagsize * 8) - 1)) & 0x01
self.__tag <<= 1
return bit
def is_end(self):
return self.__offset == len(self.__in) and self.__curbit == 1
def read_byte(self):
"""read next byte from the stream"""
if type(self.__in) == str:
result = self.__in[self.__offset]
elif type(self.__in) == file:
result = self.__in.read(1)
self.__offset += 1
return result
def read_fixednumber(self, nbbit, init=0):
"""reads a fixed bit-length number"""
result = init
for i in xrange(nbbit):
result = (result << 1) + self.read_bit()
return result
def read_variablenumber(self):
"""return a variable bit-length number x, x >= 2
reads a bit until the next bit in the pair is not set"""
result = 1
result = (result << 1) + self.read_bit()
while self.read_bit():
result = (result << 1) + self.read_bit()
return result
def read_setbits(self, max_, set_=1):
"""read bits as long as their set or a maximum is reached"""
result = 0
while result < max_ and self.read_bit() == set_:
result += 1
return result
def back_copy(self, offset, length=1):
for i in xrange(length):
self.out += self.out[-offset]
return
def read_literal(self, value=None):
if value is None:
self.out += self.read_byte()
else:
self.out += value
return False
# from kbp\comp\aplib.py ###################################################
"""
aPLib, LZSS based lossless compression algorithm
Jorgen Ibsen U{http://www.ibsensoftware.com}
"""
def lengthdelta(offset):
if offset < 0x80 or 0x7D00 <= offset:
return 2
elif 0x500 <= offset:
return 1
return 0
class compress(_bits_compress):
"""
aplib compression is based on lz77
"""
def __init__(self, data, length=None):
_bits_compress.__init__(self, 1)
self.__in = data
self.__length = length if length is not None else len(data)
self.__offset = 0
self.__lastoffset = 0
self.__pair = True
return
def __literal(self, marker=True):
if marker:
self.write_bit(0)
self.write_byte(self.__in[self.__offset])
self.__offset += 1
self.__pair = True
return
def __block(self, offset, length):
assert offset >= 2
self.write_bitstring("10")
# if the last operations were literal or single byte
# and the offset is unchanged since the last block copy
# we can just store a 'null' offset and the length
if self.__pair and self.__lastoffset == offset:
self.write_variablenumber(2) # 2-
self.write_variablenumber(length)
else:
high = (offset >> 8) + 2
if self.__pair:
high += 1
self.write_variablenumber(high)
low = offset & 0xFF
self.write_byte(low)
self.write_variablenumber(length - lengthdelta(offset))
self.__offset += length
self.__lastoffset = offset
self.__pair = False
return
def __shortblock(self, offset, length):
assert 2 <= length <= 3
assert 0 < offset <= 127
self.write_bitstring("110")
b = (offset << 1 ) + (length - 2)
self.write_byte(b)
self.__offset += length
self.__lastoffset = offset
self.__pair = False
return
def __singlebyte(self, offset):
assert 0 <= offset < 16
self.write_bitstring("111")
self.write_fixednumber(offset, 4)
self.__offset += 1
self.__pair = True
return
def __end(self):
self.write_bitstring("110")
self.write_byte(chr(0))
return
def do(self):
self.__literal(False)
while self.__offset < self.__length:
offset, length = find_longest_match(self.__in[:self.__offset],
self.__in[self.__offset:])
if length == 0:
c = self.__in[self.__offset]
if c == "\x00":
self.__singlebyte(0)
else:
self.__literal()
elif length == 1 and 0 <= offset < 16:
self.__singlebyte(offset)
elif 2 <= length <= 3 and 0 < offset <= 127:
self.__shortblock(offset, length)
elif 3 <= length and 2 <= offset:
self.__block(offset, length)
else:
self.__literal()
#raise ValueError("no parsing found", offset, length)
self.__end()
return self.getdata()
class decompress(_bits_decompress):
def __init__(self, data):
_bits_decompress.__init__(self, data, tagsize=1)
self.__pair = True # paired sequence
self.__lastoffset = 0
self.__functions = [
self.__literal,
self.__block,
self.__shortblock,
self.__singlebyte]
return
def __literal(self):
self.read_literal()
self.__pair = True
return False
def __block(self):
b = self.read_variablenumber() # 2-
if b == 2 and self.__pair : # reuse the same offset
offset = self.__lastoffset
length = self.read_variablenumber() # 2-
else:
high = b - 2 # 0-
if self.__pair:
high -= 1
offset = (high << 8) + ord(self.read_byte())
length = self.read_variablenumber() # 2-
length += lengthdelta(offset)
self.__lastoffset = offset
self.back_copy(offset, length)
self.__pair = False
return False
def __shortblock(self):
b = ord(self.read_byte())
if b <= 1: # likely 0
return True
length = 2 + (b & 0x01) # 2-3
offset = b >> 1 # 1-127
self.back_copy(offset, length)
self.__lastoffset = offset
self.__pair = False
return False
def __singlebyte(self):
offset = self.read_fixednumber(4) # 0-15
if offset:
self.back_copy(offset)
else:
self.read_literal('\x00')
self.__pair = True
return False
def do(self):
"""returns decompressed buffer and consumed bytes counter"""
self.read_literal()
while True:
# Allow for partial decryption with exception
try:
if self.__functions[self.read_setbits(3)]():
break
except Exception as e:
break
return self.out, self.getoffset()
if __name__ == "__main__":
# from kbp\test\aplib_test.py ######################################################################
assert decompress(compress("a").do()).do() == ("a", 3)
assert decompress(compress("ababababababab").do()).do() == ('ababababababab', 9)
assert decompress(compress("aaaaaaaaaaaaaacaaaaaa").do()).do() == ('aaaaaaaaaaaaaacaaaaaa', 11)
|
[
"haddoc@marlinspike.local"
] |
haddoc@marlinspike.local
|
5c6bdf9b95a8cda74e2d9d3ad5074f0a002e7c12
|
17427e84a6d3e8c5e1d363d7824fa9ded9e0498d
|
/create_figs/scripts/gaussian.py
|
9e040ecf99010b0fec87b52bb6819d679ff2c37d
|
[
"MIT"
] |
permissive
|
avishvj/ts_gen
|
b4a685266dbc109b13fceb16e87eb699241c9212
|
8877ce9115f13ff1d9c72513d4c37ae782e23b32
|
refs/heads/master
| 2023-06-06T23:21:28.830917
| 2021-07-06T23:20:09
| 2021-07-06T23:20:09
| 326,278,858
| 0
| 0
|
MIT
| 2021-01-02T22:31:38
| 2021-01-02T22:00:47
| null |
UTF-8
|
Python
| false
| false
| 8,269
|
py
|
import numpy as np
import openbabel
import pybel
class Gaussian(object):
"""
Class for reading data from Gaussian log files.
The attribute `input_file` represents the path where the input file for the
quantum job is located, the attribute `logfile` represents the path where
the log file containing the results is located, the attribute `chkfile`
represents the path where a checkpoint file for reading from a previous job
is located, and the attribute `output` contains the output of the
calculation in a list.
"""
def __init__(self, input_file=None, logfile=None, chkfile=None):
self.converge = True
self.input_file = input_file
self.logfile = logfile
self.chkfile = chkfile
if logfile is not None:
self.read()
else:
self.output = None
def read(self):
"""
Reads the contents of the log file.
"""
with open(self.logfile, 'r') as f:
self.output = f.read().splitlines()
if self.output[-1].split()[0] != 'Normal':
self.converge = False
@staticmethod
def _formatArray(a, b=3):
"""
Converts raw geometry or gradient array of strings, `a`, to a formatted
:class:`numpy.ndarray` of size N x 'b'. Only the rightmost 'b' values of
each row in `a` are retained.
"""
vec = np.array([])
for row in a:
vec = np.append(vec, [float(e) for e in row.split()[-b:]])
return vec.reshape(len(a), b)
def getNumAtoms(self):
"""
Extract and return number of atoms from Gaussian job.
"""
read = False
natoms = 0
i = 0
for line in self.output:
if read:
i += 1
try:
natoms = int(line.split()[0])
except ValueError:
if i > 5:
return natoms
continue
elif 'Input orientation' in line or 'Z-Matrix orientation' in line:
read = True
raise QuantumError('Number of atoms could not be found in Gaussian output')
def getEnergy(self):
"""
Extract and return energy (in Hartree) from Gaussian job.
"""
# Read last occurrence of energy
for line in reversed(self.output):
if 'SCF Done' in line:
energy = float(line.split()[4])
return energy
raise QuantumError('Energy could not be found in Gaussian output')
def getGradient(self):
"""
Extract and return gradient (forces) from Gaussian job. Results are
returned as an N x 3 array in units of Hartree/Angstrom.
"""
natoms = self.getNumAtoms()
# Read last occurrence of forces
for line_num, line in enumerate(reversed(self.output)):
if 'Forces (Hartrees/Bohr)' in line:
force_mat_str = self.output[-(line_num - 2):-(line_num - 2 - natoms)]
break
else:
raise QuantumError('Forces could not be found in Gaussian output')
gradient = - self._formatArray(force_mat_str) / constants.bohr_to_ang # Make negative to get gradient
return gradient
def getGeometry(self, atomType=True):
"""
Extract and return final geometry from Gaussian job. Results are
returned as an N x 3 array in units of Angstrom.
If atomType is true, returns N x 4 array with first column as
atomic number
"""
natoms = self.getNumAtoms()
# Read last occurrence of geometry
for line_num, line in enumerate(reversed(self.output)):
if 'Input orientation' in line or 'Z-Matrix orientation' in line:
coord_mat_str = self.output[-(line_num - 4):-(line_num - 4 - natoms)]
break
else:
raise QuantumError('Geometry could not be found in Gaussian output')
if atomType:
geometry = self._formatArray(coord_mat_str, b=5)
geometry = np.delete(geometry, 1, 1)
else:
geometry = self._formatArray(coord_mat_str, b=3)
return geometry
def getIRCpath(self):
"""
Extract and return IRC path from Gaussian job. Results are returned as
a list of tuples of N x 3 coordinate arrays in units of Angstrom and
corresponding energies in Hartrees. Path does not include TS geometry.
"""
for line in self.output:
if 'IRC-IRC' in line:
break
else:
raise QuantumError('Gaussian output does not contain IRC calculation')
natoms = self.getNumAtoms()
# Read IRC path (does not include corrector steps of last point if there was an error termination)
path = []
for line_num, line in enumerate(self.output):
if 'Input orientation' in line or 'Z-Matrix orientation' in line:
coord_mat = self._formatArray(self.output[line_num + 5:line_num + 5 + natoms])
elif 'SCF Done' in line:
energy = float(line.split()[4])
elif 'Forces (Hartrees/Bohr)' in line:
force_mat_str = self.output[line_num + 3:line_num + 3 + natoms]
gradient = - self._formatArray(force_mat_str) / constants.bohr_to_ang
elif 'NET REACTION COORDINATE UP TO THIS POINT' in line:
path.append((coord_mat, energy, gradient))
if not path:
raise QuantumError('IRC path is too short')
return path
def getNumImaginaryFrequencies(self):
"""
Extract and return the number of imaginary frequencies from a Gaussian
job.
"""
for line in self.output:
if 'imaginary frequencies' in line:
nimag = int(line.split()[1])
return nimag
raise QuantumError('Frequencies could not be found in Gaussian output')
def getCharge(self):
"""
Extract and return charge from Gaussian log file
"""
for line in self.output:
if 'Charge' in line:
charge = int(line.split()[2])
return charge
raise QuantumError('Charge could not be found in Gaussian output')
def getMultiplicity(self):
"""
Extract and return multiplicity from Gaussian log file
"""
for line in self.output:
if 'Multiplicity' in line:
charge = int(line.split()[5])
return charge
raise QuantumError('Multiplicity could not be found in Gaussian output')
def getFrequencies(self):
"""
Extract and return list of frequencies
"""
freqs = []
for line in self.output:
if line.split()[0] == "Frequencies":
freqs.extend(line.split()[2:])
return [float(freq) for freq in freqs]
def getPybelMol(self):
"""
Converts a Gaussian output log file topybel mol
"""
return pybel.readfile('g09', self.logfile).next()
def getSmiles(self, all_single_bonds=False, delete_stereochem=False):
"""
Converts a Gaussian output log file to canonical SMILES with openbabel
"""
mol = self.getPybelMol()
if all_single_bonds:
for bond in pybel.ob.OBMolBondIter(mol.OBMol):
bond.SetBondOrder(1)
mol.write()
if delete_stereochem:
mol.OBMol.DeleteData(openbabel.StereoData)
return mol.write('smi').split()[0]
def getDistanceMatrix(self):
"""
Use extracted geometry from Gaussian log file to generate NxN distance matrix
"""
X = self.getGeometry(atomType=False)
Dsq = np.square(np.expand_dims(X, 1)-np.expand_dims(X, 0))
return np.sqrt(np.sum(Dsq, 2))
class constants():
def __init__(self):
pass
def bohr_to_ang(self):
return 0.529177211
def hartree_to_kcal_per_mol(self):
return 627.5095
def kcal_to_J(self):
return 4184
class QuantumError(Exception):
"""dummy exception class for gaussian class"""
pass
|
[
"av1017@ic.ac.uk"
] |
av1017@ic.ac.uk
|
7a2fcbba659bb83f947490fc946a7ff3ba4665d2
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/ARMmbed/htrun/mbed_host_tests/host_tests_plugins/host_test_plugins.py
|
1c965fab88a3dc757f8bce97bec9d4293718641b
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plugins used with host tests
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = [] # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
return False
def is_os_supported(self, os_name=None):
"""!
@return Returns true if plugin works (supportes) under certain OS
@os_name String describing OS.
See self.mbed_os_support() and self.mbed_os_info()
@details In some cases a plugin will not work under particular OS
mainly because command / software used to implement plugin
functionality is not available e.g. on MacOS or Linux.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""! Function prints error in console and exits always with False
@param text Text to print
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
"""! Function prints notification in console and exits always with True
@param text Text to print
@param NL Newline will be added behind text if this flag is True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
"""! Waits until destination_disk is ready and can be accessed by e.g. copy commands
@return True if mount point was ready in given time, False otherwise
@param destination_disk Mount point (disk) which will be checked for readiness
@param init_delay - Initial delay time before first access check
@param loop_delay - polling delay for access check
"""
result = False
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char('.')
return result
def check_parameters(self, capability, *args, **kwargs):
"""! This function should be ran each time we call execute() to check if none of the required parameters is missing
@return Returns True if all parameters are passed to plugin, else return False
@param capability Capability name
@param args Additional parameters
@param kwargs Additional parameters
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters):
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(missing_parameters)))
return False
return True
def run_command(self, cmd, shell=True):
"""! Runs command from command line.
@param cmd Command to execute
@param shell True if shell command should be executed (eg. ls, ps)
@details Function prints 'cmd' return code if execution failed
@return True if command successfully executed
"""
result = True
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
def mbed_os_info(self):
"""! Returns information about host OS
@return Returns tuple with information about OS and host platform
"""
result = (os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform)
return result
def mbed_os_support(self):
"""! Function used to determine host OS
@return Returns None if host OS is unknown, else string with name
@details This function should be ported for new OS support
"""
result = None
os_info = self.mbed_os_info()
if (os_info[0] == 'nt' and os_info[1] == 'Windows'):
result = 'Windows7'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux' and ('Ubuntu' in os_info[3])):
result = 'Ubuntu'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux'):
result = 'LinuxGeneric'
elif (os_info[0] == 'posix' and os_info[1] == 'Darwin'):
result = 'Darwin'
return result
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
49fb226b675d489572ed2baae6a5f8788986cf41
|
cf71ad17bc20b3b9fd5a8c5104512acbe9da813d
|
/slab/experiments/Hafnium/ExpLib/seq_expt_new.py
|
ad314bb754df8692de1782343d1370d845d36555
|
[
"MIT"
] |
permissive
|
SchusterLab/slab
|
8c62c3246f29a500ad75602185eb703fcaa30e34
|
b991c7de1dd5ded42c55dbc40fdf2f4de289db54
|
refs/heads/master
| 2023-01-29T06:29:35.621657
| 2022-09-09T18:10:08
| 2022-09-09T18:10:08
| 55,183,085
| 8
| 9
| null | 2021-08-06T17:21:52
| 2016-03-31T21:08:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
__author__ = 'Nelson'
from slab.experiments.General.run_experiment import *
from slab.experiments.Multimode.run_multimode_experiment import *
import numbers
from slab import *
import json
import gc
def update_dict(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class SequentialExperiment():
def __init__(self, lp_enable = True):
self.expt = None
self.lp_enable = lp_enable
# config_file = 'config.json'
# datapath = os.getcwd()
def run(self,expt_name,vary_dict={}, expt_kwargs = {}):
if self.expt is not None:
del self.expt
gc.collect()
datapath = os.getcwd() + '\data'
config_file = os.path.join(datapath, "..\\config" + ".json")
with open(config_file, 'r') as fid:
cfg_str = fid.read()
cfg_dict = json.loads(cfg_str)
cfg_dict_temp = update_dict(cfg_dict, vary_dict)
with open('config_temp.json', 'w') as fp:
json.dump(cfg_dict_temp, fp)
## automatically save kwargs to data_file
if 'data_file' in expt_kwargs:
data_file = expt_kwargs['data_file']
for key in expt_kwargs:
if isinstance(expt_kwargs[key],numbers.Number) and not key == 'update_config':
with SlabFile(data_file) as f:
f.append_pt(key, float(expt_kwargs[key]))
f.close()
if 'seq_pre_run' in expt_kwargs:
expt_kwargs['seq_pre_run'](self)
self.expt = run_experiment(expt_name,self.lp_enable,config_file = '..\\config_temp.json',**expt_kwargs)
if self.expt is None:
self.expt = run_multimode_experiment(expt_name,self.lp_enable,**expt_kwargs)
if 'seq_post_run' in expt_kwargs:
expt_kwargs['seq_post_run'](self)
if 'update_config' in expt_kwargs:
if expt_kwargs['update_config']:
self.save_config()
def save_config(self):
self.expt.save_config()
print("config saved!")
|
[
"david.schuster@gmail.com"
] |
david.schuster@gmail.com
|
a43f87a27d0d128c1a43e0aa7aeecc80d141900e
|
388a5a0371835cc0b57761cab40a83c20f9aefdb
|
/xici_spider.py
|
66112f4af3a5bba22328fa5d4fe2cbdb09e91e8d
|
[] |
no_license
|
ricardozhang1/crawl_xici_ip
|
96fefa7ce7d7c3d2cc97871aec2f0f396646854f
|
2ef5c8f57b81b07abd9d571ce9e7d1678dc0324d
|
refs/heads/master
| 2021-05-16T15:02:04.588173
| 2018-03-22T04:00:15
| 2018-03-22T04:00:15
| 118,891,651
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import MySQLdb
import time
from scrapy import Selector
from utils.common import get_md5
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"
headers = {
'User-Agent': user_agent
}
def crawl_ip(url):
response = requests.get(url,headers=headers)
a = []
d = []
responses = Selector(text=response.text)
all_node = responses.css('#ip_list tr')
for i in all_node:
l = i.css('td::text').extract()
b = len(l)
if b != 0:
a.append((l[0], l[1], l[5]))
h = i.css('.bar::attr(title)').extract()
c = len(h)
if c != 0:
d.append((h[0].replace('秒', '')))
return a,d
def mysql_store(a,d):
print(type(a),type(d))
pass
conn = MySQLdb.connect(host="127.0.0.1",user="root",passwd="1234",db="mysql_test_01",charset='utf8')
cursor = conn.cursor()
for i in range(100):
cursor.execute(
"""INSERT INTO text_02(ip, port, proxy_type,ip_haxi,speed) VALUES ('{0}','{1}','{2}','{3}','{4}')""".format(a[i][0],a[i][1],a[i][2],get_md5(a[i][0]+a[i][1]+str(time.time())+a[i][2]),d[i])
)
conn.commit()
conn.close()
if __name__ == '__main__':
for i in range(1999):
url = "http://www.xicidaili.com/nn/{0}".format(i+1)
q,h = crawl_ip(url)
mysql_store(q,h)
|
[
"noreply@github.com"
] |
ricardozhang1.noreply@github.com
|
45be5e89063ddcadd087f81db42d07dfaf520bff
|
cb056e2250f0130c0d0b965f2d6dd88ef8f74506
|
/train.py
|
599073aeeefb0f5c50bbba69c8f55b2494671b77
|
[] |
no_license
|
Dy1anT/AI-Sec-homework
|
2da9b2b942634a1a3e3d8f0494eadda4f90049fc
|
8fa8144d3b455a9b924dc071aa30a39d84d68e25
|
refs/heads/main
| 2023-08-16T20:16:44.397446
| 2021-10-08T04:37:46
| 2021-10-08T04:37:46
| 410,306,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import argparse
import time
from model import ResNet18, Vgg16_Net
from utils import *
parser = argparse.ArgumentParser(description='AI Sec homework_1')
parser.add_argument('--dataset', type=str, default='cifar10', help='cifar-10')
parser.add_argument('--epochs', type=int, default=40, help="epochs")
parser.add_argument('--lr', type=float, default=0.1, help="learning rate")
parser.add_argument('--batch_size', type=int, default=128, help="batch size")
parser.add_argument('--momentum', type=float, default=0.9, help="momentum")
parser.add_argument('--weight_decay', type=float, default=5e-4, help="L2")
parser.add_argument('--scheduler', type=int, default=1, help="lr scheduler")
parser.add_argument('--seed', type=int, default=0, help="lucky number")
args = parser.parse_args()
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
device = "cpu"
if torch.cuda.is_available():
device = "cuda:0"
print("Running on %s" % device)
#net = ResNet18().to(device)
net = Vgg16_Net().to(device)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='data/cifar10', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='data/cifar10', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30], gamma=0.1)
x = []
loss_list = []
train_acc_list = []
test_acc_list = []
iter_count = 0
start_time = time.time()
for epoch in range(args.epochs):
print('epoch: %d' % (epoch + 1))
net.train()
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
pred = net(inputs.to(device))
loss = criterion(pred, labels.to(device))
loss.backward()
optimizer.step()
if iter_count % 100 == 0:
train_acc = test_accuracy(trainloader, net, 1000)
test_acc = test_accuracy(testloader, net, 1000)
print("iter:",iter_count,"loss: %.3f train acc: %.3f test acc: %.3f" % (loss.item(), train_acc, test_acc))
x.append(iter_count)
loss_list.append(loss.item())
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
iter_count += 1
if args.scheduler:
scheduler.step()
plt.plot(x, train_acc_list, label = "train_acc")
plt.plot(x, test_acc_list, label = "test_acc")
plt.grid()
plt.legend()
plt.xlabel("iterations")
plt.ylabel("accuracy")
plt.savefig("img/train_acc.png")
plt.close("all")
plt.plot(x, loss_list)
plt.grid()
plt.xlabel("iterations")
plt.ylabel("loss")
plt.savefig("img/train_loss.png")
plt.close("all")
train_time = time.time() - start_time
print("train time:", train_time)
|
[
"627756747@qq.com"
] |
627756747@qq.com
|
5499c1d27c8ce008320d45fd388a3f8319c2ff87
|
9ab1cecff161cec8606165dc614bbfcb9bda2745
|
/fnt2xml.py
|
26ad74360718341adcb163181d42190317bb40d7
|
[] |
no_license
|
wowgeeker/fnt2xml
|
4fb8ad532eecfb3299842fad46ff9080adcd317e
|
72dab5bf6113b118789f5a0dbaef056ec0451568
|
refs/heads/master
| 2020-03-22T19:48:07.874948
| 2018-07-11T09:29:57
| 2018-07-11T09:29:57
| 140,551,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
# -*- coding:utf-8 -*-
# author: nobody
# createdAt: 2018/7/11
input = 'jiesuan_num.fnt'
arr = input.split('.')
arr[0] += 'xml'
output = '.'.join(arr)
header = '<?xml version="1.0"?>'
import xml.etree.ElementTree as etree
root = etree.Element("font")
info = etree.SubElement(root, "info")
common = etree.SubElement(root, "common")
pages = etree.SubElement(root, "pages")
page = etree.SubElement(pages, "page")
page.attrib['id'] = '0'
page.attrib['file'] = input.split('.')[0] + '.png'
chars = etree.SubElement(root, "chars")
tree = etree.ElementTree(root)
with open(input) as fr:
originfnt = filter(lambda x: x, fr.readlines())
def parseline(line):
line = line.strip()
parts = line.split(' ')
tag = parts[0].strip()
attrs = filter(lambda x:len(x)==2, [p.strip().split('=') for p in parts[1:]])
for a in attrs:
print a
if a[1].startswith('"') and a[1].endswith('"'):
a[1] = a[1][1:-1]
return tag, attrs
for line in originfnt:
if line.startswith('info'):
tag, attrs = parseline(line)
for a in attrs:
info.attrib[a[0]] = a[1]
if line.startswith('common'):
tag, attrs = parseline(line)
for a in attrs:
common.attrib[a[0]] = a[1]
if line.startswith('page'):
tag, attrs = parseline(line)
for a in attrs:
page.attrib[a[0]] = a[1]
if line.startswith('chars '):
tag, attrs = parseline(line)
for a in attrs:
chars.attrib[a[0]] = a[1]
if line.startswith('char '):
tag, attrs = parseline(line)
char = etree.SubElement(chars, "char")
for a in attrs:
char.attrib[a[0]] = a[1]
s = '\n'.join([header, etree.tostring(root)])
with open(output, 'w') as fw:
fw.write(s)
|
[
"whuzouming@gmail.com"
] |
whuzouming@gmail.com
|
5957be3eebf4bcc847582b8b20f6771924155403
|
4c9580b2e09e2b000e27a1c9021b12cf2747f56a
|
/chapter05/chapter05_example01/chapter05_example01/settings.py
|
6bdae198873c43cd8667c6b9aac8266fb69c6642
|
[] |
no_license
|
jzplyy/xiaoyue_mall
|
69072c0657a6878a4cf799b8c8218cc7d88c8d12
|
4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc
|
refs/heads/master
| 2023-06-26T02:48:03.103635
| 2021-07-22T15:51:07
| 2021-07-22T15:51:07
| 388,514,311
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
"""
Django settings for chapter05_example01 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'chapter05_example01\\apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gj(^a0w1e_)p4_+%9y4q3i#7yz_423=^ze4+9-wpj!8sci=esy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'goods'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chapter05_example01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chapter05_example01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"jzplyy@126.com"
] |
jzplyy@126.com
|
6a109be858f97838ec1b218c45ba9290bf5caf36
|
dcc611cfe2f8525ade12bd2238748b0b036e6c47
|
/pages/views.py
|
a5a07b7d93052b198aafedb08b1de868bcf67ecb
|
[] |
no_license
|
tjtiv/SupplyFinder
|
7df4efdb593a5c14697af6d1c3abe581870f0a21
|
313e8e78fc5ac273302152ff8e9386e1ae9cb943
|
refs/heads/master
| 2022-11-27T08:01:46.171098
| 2020-08-01T03:04:59
| 2020-08-01T03:04:59
| 282,994,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from django.shortcuts import render
def home_view(response, *args, **kwargs):
return render(response, "index.html", {})
|
[
"ivtjt78@gmail.com"
] |
ivtjt78@gmail.com
|
e50221e7704c68fda8b8c2c1e3ea75b25e730961
|
94d9bdd6d6a1b7a7859b0b828c9592cf7f38cbc2
|
/Python/SciPy/OperationsBetweenMatrices.py
|
9799527b9b8667a076a4725dfdbab120e3731df8
|
[] |
no_license
|
EdelKearney/FileDump
|
afdb731a6dfdb21d789ebee29c12fb3812ddcf4a
|
97399f0909b824c6b9fd5661a81614ed9b71cd25
|
refs/heads/master
| 2020-09-08T19:21:05.301808
| 2019-11-12T13:15:34
| 2019-11-12T13:15:34
| 221,220,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy
import scipy.linalg
mu = 1/numpy.sqrt(2)
A = numpy.matrix([[mu, 0, mu], [0,1,0], [mu, 0, -mu]])
B = scipy.linalg.kron(A,A)
print(B[:,0:-1:2])
|
[
"A00245506@student.ait.ie"
] |
A00245506@student.ait.ie
|
ef8495ed987b371d3c9c09347e179d7fee0cfd92
|
6320fef2ea7376c2b35f97f1a5af004e90f09098
|
/1-2주차 실습(복습)/venv/Lib/site-packages/pygame/tests/test_utils/__init__.py
|
fd3ec69cb674929081e3d41837df1458fa33d018
|
[] |
no_license
|
Dplo1514/ploaistudy
|
7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9
|
e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c
|
refs/heads/master
| 2023-09-03T00:45:55.601651
| 2021-10-24T12:19:38
| 2021-10-24T12:19:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,442
|
py
|
#################################### IMPORTS ###################################
is_pygame_pkg = __name__.startswith("pygame.tests.")
import tempfile, sys, pygame, time, os
################################################################################
# Python 3.x compatibility
try:
xrange_ = xrange
except NameError:
xrange_ = range
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
if sys.version_info[0] == 3:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tobytes()
else:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tostring()
import unittest
if not hasattr(unittest.TestCase, "subTest"):
import contextlib
@contextlib.contextmanager
def subTest(self, msg=None, **params):
yield
return
unittest.TestCase.subTest = subTest
def geterror():
return sys.exc_info()[1]
class AssertRaisesRegexMixin(object):
"""Provides a way to prevent DeprecationWarnings in python >= 3.2.
For this mixin to override correctly it needs to be before the
unittest.TestCase in the multiple inheritance hierarchy.
e.g. class TestClass(AssertRaisesRegexMixin, unittest.TestCase)
This class/mixin and its usage can be removed when pygame no longer
supports python < 3.2.
"""
def assertRaisesRegex(self, *args, **kwargs):
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegex(
*args, **kwargs
)
except AttributeError:
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegexp(
*args, **kwargs
)
except AttributeError:
self.skipTest("No assertRaisesRegex/assertRaisesRegexp method")
################################################################################
this_dir = os.path.dirname(os.path.abspath(__file__))
trunk_dir = os.path.split(os.path.split(this_dir)[0])[0]
if is_pygame_pkg:
test_module = "tests"
else:
test_module = "test"
def trunk_relative_path(relative):
return os.path.normpath(os.path.join(trunk_dir, relative))
def fixture_path(path):
return trunk_relative_path(os.path.join(test_module, "fixtures", path))
def example_path(path):
return trunk_relative_path(os.path.join("examples", path))
sys.path.insert(0, trunk_relative_path("."))
################################## TEMP FILES ##################################
def get_tmp_dir():
return tempfile.mkdtemp()
################################################################################
def question(q):
return raw_input_("\n%s (y/n): " % q.rstrip(" ")).lower().strip() == "y"
def prompt(p):
return raw_input_("\n%s (press enter to continue): " % p.rstrip(" "))
#################################### HELPERS ###################################
def rgba_between(value, minimum=0, maximum=255):
if value < minimum:
return minimum
elif value > maximum:
return maximum
else:
return value
def combinations(seqs):
"""
Recipe 496807 from ActiveState Python CookBook
Non recursive technique for getting all possible combinations of a sequence
of sequences.
"""
r = [[]]
for x in seqs:
r = [i + [y] for y in x for i in r]
return r
def gradient(width, height):
"""
Yields a pt and corresponding RGBA tuple, for every (width, height) combo.
Useful for generating gradients.
Actual gradient may be changed, no tests rely on specific values.
Used in transform.rotate lossless tests to generate a fixture.
"""
for l in xrange_(width):
for t in xrange_(height):
yield (l, t), tuple(map(rgba_between, (l, t, l, l + t)))
def rect_area_pts(rect):
for l in xrange_(rect.left, rect.right):
for t in xrange_(rect.top, rect.bottom):
yield l, t
def rect_perimeter_pts(rect):
"""
Returns pts ((L, T) tuples) encompassing the perimeter of a rect.
The order is clockwise:
topleft to topright
topright to bottomright
bottomright to bottomleft
bottomleft to topleft
Duplicate pts are not returned
"""
clock_wise_from_top_left = (
[(l, rect.top) for l in xrange_(rect.left, rect.right)],
[(rect.right - 1, t) for t in xrange_(rect.top + 1, rect.bottom)],
[(l, rect.bottom - 1) for l in xrange_(rect.right - 2, rect.left - 1, -1)],
[(rect.left, t) for t in xrange_(rect.bottom - 2, rect.top, -1)],
)
for line in clock_wise_from_top_left:
for pt in line:
yield pt
def rect_outer_bounds(rect):
"""
Returns topleft outerbound if possible and then the other pts, that are
"exclusive" bounds of the rect
?------O
|RECT| ?|0)uterbound
|----|
O O
"""
return ([(rect.left - 1, rect.top)] if rect.left else []) + [
rect.topright,
rect.bottomleft,
rect.bottomright,
]
def import_submodule(module):
m = __import__(module)
for n in module.split(".")[1:]:
m = getattr(m, n)
return m
class SurfaceSubclass(pygame.Surface):
"""A subclassed Surface to test inheritance."""
def __init__(self, *args, **kwargs):
super(SurfaceSubclass, self).__init__(*args, **kwargs)
self.test_attribute = True
def test():
"""
Lightweight test for helpers
"""
r = pygame.Rect(0, 0, 10, 10)
assert rect_outer_bounds(r) == [(10, 0), (0, 10), (10, 10)] # tr # bl # br
assert len(list(rect_area_pts(r))) == 100
r = pygame.Rect(0, 0, 3, 3)
assert list(rect_perimeter_pts(r)) == [
(0, 0),
(1, 0),
(2, 0), # tl -> tr
(2, 1),
(2, 2), # tr -> br
(1, 2),
(0, 2), # br -> bl
(0, 1), # bl -> tl
]
print("Tests: OK")
################################################################################
|
[
"dladlsgur3334@gmail.com"
] |
dladlsgur3334@gmail.com
|
20fa377820e22a72852c0ce92c6e7a078111b216
|
cf28af03915775943c5ba087f4a641d9bd7a67c4
|
/KNN_from_scratch.py
|
10a16e935b92110e9eae061c9cfc1d44e06c8b6d
|
[] |
no_license
|
c-lyons/data-science-from-scratch
|
a38c918545aad5403d4022dc416b76f632d4b592
|
409cad0e2438cea12b3ad8f1e78f00982ade628d
|
refs/heads/main
| 2023-04-03T14:22:41.883304
| 2021-04-24T14:43:10
| 2021-04-24T14:43:10
| 361,186,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
from sortedcontainers import SortedList
import numpy as np
import pandas as pd
from datetime import datetime
def get_data(limit=None):
print("Reading image data and transforming...")
df = pd.read_csv('data/mnist_sample.csv')
data = df.to_numpy()
np.random.shuffle(data)
X = data[:, 1:] / 255 # normalising pixel values from 0->1
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
class KNN(object):
def __init__ (self, k):
self.k = k
def fit(self, X, y):
self.X = X
self.y = y
def predict(self, X):
y = np.zeros(len(X)) # initializing y column as zeros array of size X
for i, x in enumerate(X):
sl = SortedList() # predefining sorted list to size K
for j, x_train in enumerate(self.X):
diff = x - x_train
d = diff.dot(diff) # squared difference using dot
if len(sl) < self.k: # if less than K neighbours, add to list
sl.add((d, self.y[j]))
elif d < sl[-1][0]:
del sl[-1]
sl.add((d, self.y[j]))
votes = {} # empty dict for votes
for k, v in sl:
votes[v] = votes.get(v, 0) + 1 # counting votes for v in sortedlist
max_votes = 0
max_votes_class = -1
for v, count in votes.items():
if count > max_votes:
max_votes = count
max_votes_class = v
y[i] = max_votes_class
return y
def score(self, X, y):
P = self.predict(X)
return np.mean(P==y) # return mean of prediction == true label
if __name__ == '__main__':
X, y = get_data(limit=5000)
n_train = 4000
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
for k in range(1, 10):
knn = KNN(k)
t0 = datetime.now()
knn.fit(X_train, y_train)
print('Training time: ', (datetime.now()-t0))
t0 = datetime.now()
print('Training Accuracy: ', knn.score(X_train, y_train))
print('Time to compute train accuracy: ', (datetime.now()-t0))
t0 = datetime.now()
print('Testing Accuracy: ', knn.score(X_test, y_test))
print('Time to compute test accuracy: ', (datetime.now()-t0))
|
[
"lyonsconor@hotmail.com"
] |
lyonsconor@hotmail.com
|
53f85d5e77b251fd803da0cc317dc6dac3e3fd02
|
b74e9be747c1a99fc5d67ca096157f512baf02ca
|
/tools/harness-automation/cases/reed_5_2_4.py
|
151fa67c0fb6cee9b74e8e983fa7bb67a1aea761
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amccool/openthread
|
468838cebc083d234192926aacb0e3efc0a83463
|
1e9d3c1dbfd66aa48c4cbb1dda0b41c9f05fefc7
|
refs/heads/master
| 2021-01-16T23:03:46.503666
| 2016-09-06T03:21:05
| 2016-09-06T03:21:05
| 67,469,844
| 0
| 0
|
BSD-3-Clause
| 2019-11-01T20:11:16
| 2016-09-06T03:23:32
|
C++
|
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_2_4(HarnessCase):
suite = 16
case = '5 2 4'
golden_devices_required = 17
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
[
"jonhui@nestlabs.com"
] |
jonhui@nestlabs.com
|
c0825be943e0bd4e2f219983d3ffeb0a9f9066ac
|
5356098b5871eeded77626b9176f6cab3ea2bb66
|
/Sample 14 -DATA Structures (SET).py
|
7ac844d02c8ad4b70976165d7157398e74a969dc
|
[] |
no_license
|
gxsoft/SkillShare---Python-3--Programming-in-Py-for-Beginners
|
ed6522489c2c608f2130d737930210ba80c4dcfd
|
3908482c612e27f87f7111fc29ea86ceb3d6340c
|
refs/heads/master
| 2020-05-17T12:12:02.901323
| 2019-05-11T14:35:18
| 2019-05-11T14:35:18
| 183,705,343
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
"""
Sample 14 -DATA Structures (SET).py
# ------------------------------------------
# Whats is SET?
#
Unique |
LISTS
TUPLES
DICTIONARIES
SETS
"""
A = {40, -2, 20, 13}
B = {4, 7, 10, 20}
print(A|B)
A.add(24)
print(A)
print(sorted(A))
A = [40, -2, 20, 13, 40]
print(A)
A = set([40, -2, 20, 13, 40])
""""
REMOVE, DISCARD
# A.remove(40)
""""
A.discard(-2)
print(A)
# A.remove(40)
print(A)
|
[
"github@gxgro.com.ar"
] |
github@gxgro.com.ar
|
7b5c47ce2f3ccc6f8806a4f85dae1a998c6e8e45
|
03adcb1d526cb4dbaad05fbf140337a861fb3618
|
/agent/forms.py
|
d4514300502d7f78e4c53ed12dddd4f681fc081f
|
[] |
no_license
|
manish5122000/rc_loan
|
d55eb03b6924c9f4de4b3cfbeb98ac524fb72832
|
079cd824238d3703982d14178028eaeedcc109b5
|
refs/heads/main
| 2023-06-30T20:12:24.595517
| 2021-08-02T18:52:51
| 2021-08-02T18:52:51
| 392,059,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UsernameField, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.contrib.auth.models import User
from adminss.models import Roles
from django.contrib.auth import password_validation
from django.utils.translation import gettext, gettext_lazy as _
#Agent Registration Form
class AgentRegistrationForm(UserCreationForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class':"form-control"}))
password2 = forms.CharField(label='Confirm Password', widget=forms.PasswordInput(attrs={'class':"form-control"}))
email = forms.CharField(required=True, widget=forms.EmailInput(attrs={'class':"form-control"}))
class Meta:
model = User
fields = ['username','email','password1','password2']
label = {'email':'Email'}
widget = {'username':forms.TextInput(attrs={'class':'form-control'})}
|
[
"mauryamanish5122000@gmail.com"
] |
mauryamanish5122000@gmail.com
|
1839dffbf6f70b8e7aec0547cae7155ba0fad1b2
|
2a27a371c9effff38e2a070b603de93167d918de
|
/Problem7.py
|
f470f1482f3567c4dca9817f2c7c8f5c19141db0
|
[] |
no_license
|
tikipatel/Project-Euler
|
c25e4c9639014b512305d9d3590105bb0cc95854
|
277ba988c4d65d7b70f7d8350303982043a3322d
|
refs/heads/master
| 2016-09-15T11:16:42.262177
| 2016-04-29T03:34:52
| 2016-04-29T03:34:52
| 26,097,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
import time
import sievePrimes
def is_prime(n):
if n <= 3:
return n >= 2
if n % 2 == 0 or n % 3 == 0:
return False
for i in range(5, int(n ** 0.5) + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True
counter = 1
increment_me = 1
while(counter != 10001):
increment_me += 2
if(is_prime(increment_me)):
counter += 1
print(increment_me)
|
[
"pratikpatel@me.com"
] |
pratikpatel@me.com
|
16bae639bb18e64b6e9238ba7f2510be78015c6e
|
99acd2459a58dd1ee549128f70044331f4178dd8
|
/storebackend/product/views.py
|
80cbd012255b74c2f77ca9d9b8915f3a2c834421
|
[] |
no_license
|
AaronM18/e-commerce-test
|
355992a1fa985533418acc8ce0b9a22bfd195515
|
040495bd5c730fbfe3e45a7e6e9e88150db478c1
|
refs/heads/main
| 2023-04-27T23:30:51.082915
| 2021-05-20T04:15:38
| 2021-05-20T04:15:38
| 363,311,350
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
from django.shortcuts import render
from django.http import Http404
from django.db.models import Q
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Product, Category
from .serializer import ProductSerializer, CategorySerializer
class LatestProductsList(APIView):
def get(self, request, format=None):
products = Product.objects.all()[0:4]
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
class ProductDetail(APIView):
def get_object(self, category_slug, product_slug):
try:
return Product.objects.filter(category__slug=category_slug).get(slug=product_slug)
except Product.DoesNotExist:
raise Http404
def get(self, request, category_slug, product_slug, format=None):
product = self.get_object(category_slug, product_slug)
serializer = ProductSerializer(product)
return Response(serializer.data)
class CategoryDetail(APIView):
def get_object(self, category_slug):
try:
return Category.objects.get(slug=category_slug)
except Category.DoesNotExist:
raise Http404
def get(self, request, category_slug, format=None):
catgeory = self.get_object(category_slug)
serializer = CategorySerializer(catgeory)
return Response(serializer.data)
@api_view(['POST'])
def search(request):
query = request.data.get('query', '')
if query:
products = Product.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
else:
return Response({'Products': []})
|
[
"aaron18mo@gmail.com"
] |
aaron18mo@gmail.com
|
913a809b21dce8f948f0e742c823d688bef2cbc7
|
6032f996f989d521dbdee23ce6c1fbd778d8e964
|
/qanta/wikipedia/categories.py
|
b5dedd32b88990ad251a82da1ae4cf7fe424ea37
|
[
"MIT"
] |
permissive
|
npow/qb
|
9af1c07afd10f6aad9dbcbdd9209c6fde0e4347f
|
044e623d2cbda96209fa1fdedffefa2208c98755
|
refs/heads/master
| 2020-05-26T15:41:13.864334
| 2019-05-26T16:47:07
| 2019-05-26T16:47:07
| 188,290,907
| 0
| 0
| null | 2019-05-23T19:02:23
| 2019-05-23T19:02:23
| null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
"""
Process Wikipedia category links
"""
import json
import re
import csv
import click
import tqdm
@click.group()
def categorylinks_cli():
pass
@categorylinks_cli.command()
@click.argument('categories_csv')
@click.argument('out_jsonl')
def clean(categories_csv, out_jsonl):
with open(categories_csv) as in_f, open(out_jsonl, 'w') as out_f:
for line in csv.reader(in_f):
if len(line) == 2:
if re.match(r'[a-zA-Z0-9\-\_\s]+', line[1]):
out_f.write(json.dumps({
'id': int(line[0]),
'cat': line[1]
}))
out_f.write('\n')
@categorylinks_cli.command()
@click.argument('category_csv')
@click.argument('out_json')
def disambiguate(category_csv, out_json):
disambiguation_pages = set()
blacklist = {
'Articles_with_links_needing_disambiguation_from_April_2018',
'All_articles_with_links_needing_disambiguation'
}
with open(category_csv) as f:
reader = csv.reader(f)
for r in tqdm.tqdm(reader, mininterval=1):
page_id, category = r[0], r[1]
l_category = category.lower()
if ((category not in blacklist) and
('disambiguation' in l_category) and
('articles_with_links_needing_disambiguation' not in l_category)):
disambiguation_pages.add(int(page_id))
with open(out_json, 'w') as f:
json.dump(list(disambiguation_pages), f)
|
[
"ski.rodriguez@gmail.com"
] |
ski.rodriguez@gmail.com
|
a6ed5a8a5d3336d2bf6c6ba243318cbfca4972e6
|
2e6b7c3a43c21d15f70a4f9f66180193f2b08113
|
/LearnFixture/test_04.py
|
f0cbc229ad689caf0da9a867993f9a851b916115
|
[] |
no_license
|
15619592320/ApiAutoTest
|
9cbf858774419fb332e2a2d8af2671b980537425
|
75c0fa6b63902f6cb18de1bfc458facdb0f3ce5a
|
refs/heads/master
| 2022-12-18T23:36:20.578922
| 2020-09-30T07:16:10
| 2020-09-30T07:16:10
| 299,828,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
'''
fixture带返回值
'''
import pytest
@pytest.fixture()
def data():
return {"username":"root","pwd":"123456"}
def test_case01(data):
print(f"{data}")
print(f"{data['username']}")
print(f"{data['pwd']}")
|
[
"623060480@qq.com"
] |
623060480@qq.com
|
322c9af3e783d219ef22bc3a5d7b31019499f926
|
b85b323f12ddca5678d7cf33e2cc4ec53693f893
|
/tests/conftest.py
|
ee4bd4f6cae00981757bd29bbb9e763912597417
|
[
"MIT"
] |
permissive
|
rapydo/do
|
d3168616ac2c7bfbe12ed34c5f932d0c567c79b4
|
6c23d7f7a747305e2acb37ac6e17686169b95dbf
|
refs/heads/3.0
| 2023-08-30T19:49:01.383844
| 2023-08-18T15:53:46
| 2023-08-18T15:53:46
| 91,668,796
| 12
| 12
|
MIT
| 2023-09-07T23:17:37
| 2017-05-18T08:31:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
import os
import pytest
from faker import Faker
# This fixture is execute before every test to create a new random folder
# To execute tests in an insulated environment
@pytest.fixture(autouse=True)
def create_folder() -> None:
# If you are already in a test folder
# (this is the case starting from the second test)
# back to the main folder
prefix = "0."
suffix = ".random.test"
# This is not executed on GithuActions because all tests are exeuted in parallel
if os.getcwd().endswith(suffix): # pragma: no cover
os.chdir("..")
# Create a new folder with a random name
# Call to untyped function "Faker" in typed context
f = Faker("en_US")
folder = f"{prefix}{f.pystr(min_chars=12, max_chars=12)}{suffix}"
os.makedirs(f"{folder}/data/logs")
os.chdir(folder)
print(f"FOLDER = {folder}")
# Beware, this replaces the standard faker fixture provided by Faker it-self
@pytest.fixture
def faker() -> Faker:
# Call to untyped function "Faker" in typed context
return Faker("en_US")
|
[
"noreply@github.com"
] |
rapydo.noreply@github.com
|
2cbb847c7b2c9cdd4feb96d32b116a46c2e068d2
|
1392a2180bf274fdf9ca05888162b19ce67407d5
|
/app/tosca/model/template.py
|
ad243fec65e8c32616304802d994272e967c4022
|
[] |
no_license
|
di-unipi-socc/toskose-packager
|
3ab170f20a02baceda276abef5346f7d43f1e476
|
4fda1baec7dd13f6c4372bf54e65de401f5fd408
|
refs/heads/master
| 2023-01-08T01:15:48.937136
| 2020-04-16T08:46:51
| 2020-04-16T08:46:51
| 166,801,812
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
'''
Template module
'''
import six
import os
from .nodes import Container, Root, Software, Volume
class Template:
def __init__(self, name):
self._nodes = {}
self.name = name
self.description = 'No description.'
self._outputs = []
self.tmp_dir = None
self.manifest_path = None
self.imports = []
self.toskose_config_path = None
def add_import(self, name, path):
if not os.path.exists(path):
raise ValueError('The file {} doesn\'t exists'.format(path))
entry = dict()
entry[name] = path
self.imports.append(entry)
@property
def nodes(self):
return (v for k, v in self._nodes.items())
@property
def containers(self):
""" The container nodes associated with the template.
Returns a generator expression.
"""
return (v for k, v in self._nodes.items() if isinstance(v, Container))
@property
def volumes(self):
""" The volume nodes associated with the template.
Returns a generator expression.
"""
return (v for k, v in self._nodes.items() if isinstance(v, Volume))
@property
def software(self):
""" The software nodes associated with the template.
Returns a generator expression.
"""
return (v for k, v in self._nodes.items() if isinstance(v, Software))
def push(self, node):
self._nodes[node.name] = node
def __getitem__(self, name):
return self._nodes.get(name, None)
def __contains__(self, item):
if isinstance(item, six.string_types):
return self[item] is not None
if isinstance(item, Root):
return self[item.name] is not None
return False
def __str__(self):
return ', '.join((i.name for i in self.nodes))
|
[
"matteo.bogo@gmail.com"
] |
matteo.bogo@gmail.com
|
66011f93ef5d97fdf940d9b1412fc1b73b26d96f
|
b7ffe6c5269ebe4e539860aa6e6bbc682337b10d
|
/20190304/TCT_20190304_DongGeunLee.py
|
2aa51f5940a6f8bf35c955cb8a24d92640139e61
|
[] |
no_license
|
hrkim28/TCT
|
eaa9f8b54f743db5c271d6fd8dbb1ed9d867a9a7
|
b749a8b656a54f3470fa63e2c2e3854fca570720
|
refs/heads/master
| 2020-04-15T04:49:58.922737
| 2019-12-06T05:56:23
| 2019-12-06T05:56:23
| 164,397,901
| 9
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
def is_pal(p_char):
return p_char == p_char[::-1]
p_input = input("문자 입력하세요 :")
if is_pal(p_input):
print('예제결과 :',0)
else:
# 회문체크 및 만들기
print('추가로직은 좀더 학습 후 도전해도록 하겠습니다.^^........')
|
[
"yeloxen@naver.com"
] |
yeloxen@naver.com
|
992015df1a02e13ed26179b6b03e4a7a89c1dfcd
|
b872ceed9575f88ddc2fd483598bda1f80a13507
|
/constants.py
|
669a14e1c99df153542398bd95fddbb82a206ab9
|
[] |
no_license
|
betohurtado3/Pokemon
|
4c734967a585aa74a95e70aad7dcc4d98d95c586
|
4cbad94ff5d4791c81b9787d58407d799d5adeed
|
refs/heads/master
| 2023-01-05T13:45:00.024837
| 2020-11-11T03:22:12
| 2020-11-11T03:22:12
| 311,851,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
#Constantes de status de pokemones
HP = "HP"
ATTACK = "Attack"
DEFENSE = "Defense"
SPATTACK = "SpAttack"
SPDEFENSE = "SpDefense"
SPEED = "Speed"
PHYSICAL = "physical"
SPECIAL = "special"
#constantes de comandos
DO_ATTACK = "attack"
DO_ATTACK_SELECTION = "selected_attack"
|
[
"betohurtado3@hotmail.com"
] |
betohurtado3@hotmail.com
|
2b5fc5daf0ba6566ee675951754d379f94225f25
|
c69451542ef9ffb6e3f12bcd99c3ea2fe84073ee
|
/SeedCast/Masters/migrations/0016_auto_20180923_1953.py
|
5692b6dbf6ea0379e14d0dde59f77d7d3a7dc12b
|
[] |
no_license
|
subhashpracs/IRRI-SEEDCAST
|
50297cd51d0d8f862627c18f1be9186d2e20203f
|
771cf9fb1db1fcf437221986e1db31e6c620c40c
|
refs/heads/master
| 2020-03-29T14:44:20.439099
| 2018-09-23T22:51:09
| 2018-09-23T22:51:09
| 150,031,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-23 19:53
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Masters', '0015_auto_20180923_1952'),
]
operations = [
migrations.AlterField(
model_name='dealer_registration',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 9, 23, 19, 53, 35, 115186)),
),
]
|
[
"sai@pracs.in"
] |
sai@pracs.in
|
665dcc52eba524df257caff6e50e0b2f063ee789
|
ae65873c3584cef7139066b224daad04410af6d2
|
/Top10Words.py
|
b6f4ffe80cc71c2040700a1f7c86913682066030
|
[] |
no_license
|
rajatkashyap/Python
|
2240c7472d07803c460c7a55d570e20694b694f9
|
f74c85c65b0e209a5f7ab25b653d42835222faaf
|
refs/heads/master
| 2022-06-25T19:20:52.847498
| 2022-06-08T14:40:45
| 2022-06-08T14:40:45
| 145,714,257
| 0
| 0
| null | 2022-04-25T00:18:37
| 2018-08-22T13:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
f=open('UHC.txt')
dict={}
words=f.read().split()
for word in words:
w=word.lower()
dict[w]=dict.get(w,0)+1
#print dict
str_tups=[]
for k,v in dict.items():
str_tups.append((v,k))
#print str_tups
str_tups.sort(reverse=True)
print str_tups[:10]
keys=dict.keys()
values=dict.values()
#print keys
#print values
values.sort(reverse=True)
for i in range(10):
for key in keys:
if dict[key]==values[i]:
print key,values[i]
'''
for i in range(10):
for d in dict:
if d[keys[i]]==values[i]:
print d '''
|
[
"rajatkashyap@Rajats-MBP.T-mobile.com"
] |
rajatkashyap@Rajats-MBP.T-mobile.com
|
9ad9f1f73f94769307e72df7e57956a71565790a
|
e741d661b1cbb1d48eff4adff3ce8d424b0b3aee
|
/meiduo_mall/apps/payment/apps.py
|
d314f1fe58cfc99b4a93ba784b8eadf87541370a
|
[
"MIT"
] |
permissive
|
Tao-bug/meiduo_project
|
09b3900ab7e1436ee3201c53461a9c119d1f56db
|
e3a24eac2c8231d0e27f6a7fa639dd36baa410b0
|
refs/heads/master
| 2022-07-16T02:18:22.319641
| 2019-11-12T13:54:23
| 2019-11-12T13:54:23
| 218,697,808
| 0
| 0
|
MIT
| 2022-07-01T00:55:01
| 2019-10-31T06:20:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class PaymentConfig(AppConfig):
name = 'apps.payment'
|
[
"897384405@qq.com"
] |
897384405@qq.com
|
2cfdb3060d28b139002d5a66203d6e2f8c2a6048
|
be03e9eb173c04fea77a4a1eada01b1b74ef6b0d
|
/langevin/__init__.py
|
b3e5aef49577bc993b223f2eb404c5845c4fc881
|
[
"MIT"
] |
permissive
|
raickhr/langevin
|
b3ac6e76180114795bec6b3494b98dad202d89d3
|
3a197740404f4986aa9160e6a0a381861c65f22b
|
refs/heads/master
| 2020-03-30T11:05:38.124589
| 2018-10-03T19:52:03
| 2018-10-03T19:52:03
| 151,152,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
"""Top-level package for Langevin."""
__author__ = """Shikhar"""
__email__ = 'shikhar.rai@rochester.edu'
__version__ = '0.1.0'
|
[
"shikhar.rai@rochester.edu"
] |
shikhar.rai@rochester.edu
|
c4623c36205ab3d912a96785be33eb4148c0fbd8
|
30602837319e2bff13fcf12f857071f375fcb56d
|
/Ironnano/admin.py
|
d7d646663afe81e855dd83038ecd1e997dc2b8ee
|
[] |
no_license
|
Aaryan-Dambe/Hygenie
|
f9985cf86768c6151b23d775890fd1f6e9c56a95
|
73416a5812b478b847448a31e18798b16de03ca4
|
refs/heads/main
| 2023-04-13T00:09:25.043578
| 2021-01-09T18:28:53
| 2021-01-09T18:28:53
| 328,220,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from django.contrib import admin
from .models import profit
# Register your models here.
admin.site.register(profit)
|
[
"76246356+Aaryan-Dambe@users.noreply.github.com"
] |
76246356+Aaryan-Dambe@users.noreply.github.com
|
7277ac2eaabcf7b897836bbb6151f5572887f2ed
|
e00f7d486559fa465d1804e45ea9f7f0a589c675
|
/CLI-Assign2.py
|
b67b44ca59f8d7fc45ee118a0ae9a5473270fcba
|
[] |
no_license
|
dhairyaj99/Command-Line-Interface
|
1ebd036adb21289fefa068026a077ebd85108d5d
|
378d436dfc2e1e544e602f74b88622dda0b5f0a5
|
refs/heads/master
| 2022-09-05T17:10:26.823650
| 2020-05-20T20:19:38
| 2020-05-20T20:19:38
| 265,677,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,827
|
py
|
from cmd import Cmd
import os
import sys
import fnmatch
class MyCLI(Cmd):
prompt = ">> "
def do_exit(self, args):
"""
Exits the system
"""
raise SystemExit()
def do_pwd(self, args):
"""
Prints current working directory of the shell
"""
currentdirectory = os.getcwd()
print(currentdirectory)
def do_ls(self, arg):
"""
Prints all files and folders in the working directory
"""
filesinfolder = os.listdir(os.curdir)
print(*filesinfolder, sep=" ")
def do_cd(self, arg):
"""
Changes the current working directory to the one given by the user
:param arg: Input given by the user- represents new working directory in this case
:return:
"""
t = os.path.isdir(arg)
if t != True:
print("Directory doesn't exist")
else:
os.chdir(arg)
def do_find(self, arg):
"""
Prints files and folders within a folder of the working directory
:param arg: Name of the folder given by the user
:return:
"""
filess = []
if arg == "":
for root, dirs, files in os.walk(os.curdir):
if arg in files:
filess.append(os.path.join(root, arg))
print(*filess, sep=" ")
else:
print("Write a filename")
def do_search(self, arg):
"""
Locates files with matching content
:param arg: User inputted text to locate
:return:
"""
for root, dirs, files in os.walk(os.curdir):
filess = [f for f in files if os.path.isfile(root + '\\' + f)]
for file in filess:
file = root + '\\' + file
fileo = open(file)
for line in fileo:
line = line.lower()
if re.search(arg, line):
print("Text given: " + args + " was found in" + file)
fileo.close()
def do_head(self, arg):
"""
Allows user to preview file content- max 5 lines
:param arg: Name of file
:return:
"""
i = 0
file = os.curdir + "\\" + arg
filer = open(file)
for l in filer:
print(l, end='')
i += 1
if i == 5:
break
def do_run(self, arg):
"""
Allows user to run scripts
:param arg: File name of the script
:return:
"""
file = os.curdir + "\\" + arg
try:
os.system(arg+ ".scriptscript")
except:
print("An error has occurred")
def do_ln(self, arg):
"""
Displays names of note files
:param arg: name of note files
:return:
"""
filess = fnmatch.filter(os.listdir(os.curdir), "*.notenote")
print(*filess, sep=" ")
def do_tn(self, arg, name):
"""
Allows user to append text to a file
:param arg: Text the user would like to append
:param name: Name of file
:return:
"""
if arg != "" & name != "":
fo = open(name +'.notenote', 'w+')
fo.write(arg)
fo.close()
else:
print("Please add content for file or write name of file")
def do_vn(self, arg):
"""
Displays the content of the note
:param arg: File name of the note
:return:
"""
if arg != "":
fo=open(arg+'.notenote')
for line in fo:
print(line)
else:
print("Please add a file name")
if __name__ == "__main__":
app = MyCLI()
app.cmdloop('Enter a command to do something: ')
|
[
"32658775+dhairyaj99@users.noreply.github.com"
] |
32658775+dhairyaj99@users.noreply.github.com
|
851a8b37f8cd9c9d414e0f78a1e8279946ad3262
|
fc424e605e7e969b65ab4a0100f6031ad8f8851c
|
/PixelDP_test_experiment/flags.py
|
da08ea3e3d97c5c042705fb13b23f5c9e6503bb6
|
[] |
no_license
|
XintongHao/Robust-CNN-with-Differential-Privacy
|
42a346dfc105af45297df7c380b5b13e81ed9402
|
cbf0ca34ca8c6589f7829af47bb50658d794fb89
|
refs/heads/master
| 2021-10-08T18:33:44.772149
| 2018-12-15T23:10:24
| 2018-12-15T23:10:24
| 160,233,609
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'cifar10', 'mnist, cifar10 or cifar100.')
tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')
tf.app.flags.DEFINE_string('data_path', os.path.join(os.path.expanduser("~"), 'datasets'),
'Data dir.')
tf.app.flags.DEFINE_string('models_dir', 'trained_models',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.model_dir.')
tf.app.flags.DEFINE_integer('num_gpus', 1,
'Number of gpus used for training.)')
tf.app.flags.DEFINE_integer('min_gpu_number', 0,
'Number of gpus used for training. (0 or 1)')
|
[
"hxtong@bu.edu"
] |
hxtong@bu.edu
|
03616431f07baf1f3dbe3f0c765987f73126cd8f
|
b42785b660c9e62b10e1ee768076dd93deb97c7f
|
/tests/test_perms.py
|
53cf2cee277a234741d7fdd8d67941311f7989a5
|
[] |
no_license
|
aphymi/pondbot-v2
|
c50c5ca801734876f059294a7d611b28d01fe0b5
|
694f25d0b1ee8b749d3e24dd314b2b81aa8e2051
|
refs/heads/master
| 2023-04-09T22:16:24.816544
| 2020-07-23T22:10:31
| 2020-07-23T22:10:31
| 263,471,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,624
|
py
|
from unittest import TestCase
import yaml
import permissions
from permissions import PermNode
class TestPermTrie(TestCase):
def test_trie_simple_eq(self):
self.assertTrue(PermNode(root=True) == PermNode(root=True))
self.assertTrue(
PermNode("a", tvalue=False) == PermNode("a", tvalue=False)
)
self.assertFalse(
PermNode("a", tvalue=True) == PermNode("a", tvalue=False)
)
def test_add_perm_to_empty_trie(self):
added_trie = PermNode.new()
added_trie.add_perm("a")
self.assertEquals(
added_trie,
PermNode(
root=True,
children={
"a": PermNode("a", tvalue=True),
},
),
)
added_trie = PermNode.new()
added_trie.add_perm("a.b.c")
manual_trie = PermNode(
root=True,
children={
"a": PermNode(
"a",
children={
"b": PermNode(
"b",
children={
"c": PermNode("c", tvalue=True)
},
),
},
),
},
)
self.assertEquals(added_trie, manual_trie)
def test_add_perm_to_nonempty_trie(self):
added_trie = PermNode(root=True, children={"a": PermNode("a")})
added_trie.add_perm("a.b")
manual_trie = PermNode(
root=True,
children={
"a": PermNode(
"a",
children={
"b": PermNode("b", tvalue=True)
},
),
},
)
self.assertEquals(added_trie, manual_trie)
added_trie = PermNode(root=True, children={"a": PermNode("a")})
added_trie.add_perm("c.d")
manual_trie = PermNode(
root=True,
children={
"a": PermNode("a"),
"c": PermNode(
"c",
children={
"d": PermNode("d", tvalue=True)
},
),
},
)
self.assertEquals(added_trie, manual_trie)
def test_add_negative_perm(self):
added_trie = PermNode.new()
added_trie.add_perm("-a")
self.assertEquals(
added_trie,
PermNode(
root=True,
children={"a": PermNode("a", tvalue=False)},
),
)
added_trie = PermNode.new()
added_trie.add_perm("-a.b.c")
manual_trie = PermNode(
root=True,
children={
"a": PermNode(
"a",
children={
"b": PermNode(
"b",
children={
"c": PermNode("c", tvalue=False)
},
),
},
),
},
)
self.assertEquals(added_trie, manual_trie)
def test_nonconflicting_basic_merge(self):
perms1 = [
"a.b.c",
"b.q.l",
"a.l.x",
"a.b.j",
]
perms2 = [
"a.d.q",
"a.b.d",
"b.j.q",
"c.a.b",
]
trie1 = PermNode.new()
trie2 = PermNode.new()
triec = PermNode.new()
for perm in perms1:
trie1.add_perm(perm)
triec.add_perm(perm)
for perm in perms2:
trie2.add_perm(perm)
triec.add_perm(perm)
trie1.merge(trie2)
self.assertEqual(trie1, triec)
def test_merge_higher_group_pri_into_wildcard(self):
lower = PermNode(root=True, group_lvl=1)
lower.add_perm("a.*", group_lvl=1)
higher = permissions.PermNode(root=True)
higher.add_perm("a.b")
lower.merge(higher)
self.assertTrue(lower.has_perm("a.absent"))
class TestGroupPermissions(TestCase):
def assertGHP(self, group, perm):
self.assertTrue(
permissions.group_has_perm(group, perm),
msg=f"Group '{group}' does not have perm '{perm}', but should.",
)
def assertNotGHP(self, group, perm):
self.assertFalse(
permissions.group_has_perm(group, perm),
msg=f"Group '{group}' has perm '{perm}', but should not."
)
@classmethod
def setUpClass(cls):
permissions.construct_perm_tries(
yaml.load(fixture, Loader=yaml.FullLoader)
)
def test_root_wildcard(self):
self.assertGHP("dev", "absent.nope.nada")
self.assertNotGHP("muted", "absent.nope.nada")
def test_nested_wildcard(self):
self.assertGHP("default", "cmd.absent")
self.assertNotGHP("nocmds", "cmd.absent")
def test_absent_perm(self):
self.assertNotGHP("default", "absent.nope.nada")
def test_atomic_perm(self):
self.assertGHP("default", "fred")
self.assertNotGHP("default", "thud")
def test_nested_perm(self):
self.assertGHP("default", "baz.bork")
self.assertNotGHP("default", "foo.bar")
def test_specific_perm_overrides_wildcard(self):
self.assertGHP("default", "regex.trigger")
self.assertNotGHP("default", "cmd.kick")
def test_specific_perm_overrides_multiple_wildcards(self):
self.assertGHP("default", "a.b.c")
self.assertNotGHP("default", "a.b.d.e")
self.assertGHP("default", "f.g.h")
self.assertNotGHP("default", "i.j.k")
def test_group_perm_overrides_inherited_perm(self):
self.assertGHP("mod", "foo.bar")
self.assertNotGHP("mod", "regex.trigger")
def test_inherited_wildcard_with_nonconflicting_group_perm(self):
self.assertGHP("mod", "l.absent")
def test_group_perm_overrides_inherited_wildcard(self):
self.assertGHP("mod", "f.b")
self.assertNotGHP("mod", "a.c")
def test_group_wildcard_overrides_inherited_perm(self):
self.assertGHP("mod", "foo.absent")
self.assertNotGHP("mod", "baz.absent")
# yaml-formatted permission config.
fixture = """
groups:
default:
perms:
- cmd.*
- -cmd.shutdown
- -cmd.restart
- -cmd.kick
- -cmd.ban
- regex.trigger
- -regex.*
- fred
- -thud
- baz.bork
- -foo.bar
- a.*
- -a.b.*
- a.b.c
- a.b.d.*
- -a.b.d.e
- -f.*
- -f.g.*
- f.g.h
- i.*
- i.j.*
- -i.j.k
- l.*
mod:
inherit:
- default
perms:
- -regex.trigger
- cmd.kick
- cmd.ban
- cmd.echo
- foo.bar
- foo.*
- -baz.*
- -a.c
- f.b
- l.a.b
admin:
inherit:
- mod
perms:
- -regex.trigger
- '*'
dev:
perms:
- '*'
muted:
perms:
- '-*'
nocmds:
perms:
- -cmd.*
"""
|
[
"aphymi@gmail.com"
] |
aphymi@gmail.com
|
037c0297e6528cdbf68ecb8b3295c9ce74f0598e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/143/usersdata/126/62651/submittedfiles/av2_p3_m2.py
|
7d22e7aa5f8b510c2f6ca3d97182ffc3fc5c67bd
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
# -*- coding: utf-8 -*-
def listadegraus(a):
b=[]
for i in range(0,len(a)-1,1):
if a[i]>a[i+1]:
cont=0
for i in range(a[i],a[i+1],-1):
cont=cont+1
b.insert(0,cont)
elif a[i]<a[i+1]:
cont=0
for i in range(a[i],a[i+1],1):
cont=cont+1
b.insert(0,cont)
elif a[i]==a[i+1]:
cont=0
b.insert(0,cont)
return(b)
n=int(input('digite a quantidade de termos da lista:'))
a=[]
for i in range(0,n,1):
m=int(input('digite um valor:'))
a.append(m)
print(listadegraus(a))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
414880acdba06fd961eab866c64febe2bd29e1a4
|
f29460914100b8e89efc746a9a4f4acc15533dc3
|
/clipboard_attendees.py
|
807b347a9cb3ae3e893921b851f17d3d13b83129
|
[] |
no_license
|
sczeck/clipboard-formatting-note-aid
|
8b289840236001af2c1461caead0a70fb8b3e363
|
219f01077a87c21fdfcf56567b0bd67ffa0787a9
|
refs/heads/master
| 2021-03-12T22:35:46.704935
| 2013-12-06T01:22:27
| 2013-12-06T01:22:27
| 14,970,200
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
print 'hello attendee script is running'
import re
from Tkinter import Tk
r = Tk()
#r.withdraw()
txt = r.clipboard_get()
#for c in txt:
# print c
#r.destroy()
#r.clipboard_clear()
#r.clipboard_append('i can has clipboardz?')
#r.destroy()
#txt = """Mark A. Hubbard; Chad A. Sclavi <casclavi@spscommerce.com>; Anthony Sarumi <asarumi@spscommerce.com>; Laura L. Johnson <lljohnson@spscommerce.com>; Elaina Perleberg <eperleberg@spscommerce.com>; John P. Myers <jpmyers@spscommerce.com>; Paul M. Jorde <pmjorde@spscommerce.com>; Nathaniel R. Andersen <nrandersen@spscommerce.com>; Sean Curran <scurran@spscommerce.com>; Ashlie Olslund <aolslund@spscommerce.com>; John Caneday <jcaneday@spscommerce.com>; Katie A. Fisher <kafisher@spscommerce.com>; Shannon Schulte <sschulte@spscommerce.com>; Tyler G. Berndt <tgberndt@spscommerce.com>"""
txt = re.sub('<.*?>','',txt)
txt = txt.replace(' ;',';')
txt = txt.replace('; ',';')
txt = txt.strip()
output = "Attendees\n"
for name in txt.split(';'):
if name == "Steve Czeck":
continue
if name.count(' ') == 2:
first,middle,last = name.split(' ')
if middle != 'St.':
name = first + " " + last
elif name.count(' ') == 3:
first,middle,lastA,lastB = name.split(' ')
name = first + " " + lastA + " " + lastB
output += "*" + name + "\n"
output += "*Steve Czeck <note taker>\n"
output += "\nNotes\n*"
#print output
r.clipboard_append(output)
r.destroy()
|
[
"sczeckgithub@gmail.com"
] |
sczeckgithub@gmail.com
|
8d27f573730b3ac854c292e5e16ce86dda2a5b4b
|
20aeba6e9fd1add068b63d94d160343a65f539be
|
/GenomeComparison.spec
|
43821ea00491bb94fa80adb356b6291f8a29371f
|
[
"MIT"
] |
permissive
|
kbaseapps/GenomeProteomeComparison
|
163ab32830eb3628321201acf4c451b330a2b720
|
f96f62941820a793124a18fe6e655da33a562033
|
refs/heads/master
| 2022-06-01T12:48:01.978041
| 2020-05-01T20:50:27
| 2020-05-01T20:50:27
| 54,588,323
| 0
| 4
|
MIT
| 2020-05-01T20:50:29
| 2016-03-23T19:44:00
|
Java
|
UTF-8
|
Python
| false
| false
| 2,509
|
spec
|
module GenomeComparison {
/*
A workspace ID that references a Genome data object.
@id ws KBaseGenomes.Genome KBaseGenomeAnnotations.GenomeAnnotation
*/
typedef string ws_genome_id;
/*
int inner_pos - position of gene name in inner genome (see dataN field in ProteomeComparison
int score - bit score of blast alignment multiplied by 100
int percent_of_best_score - best bit score of all hits connected to either of two genes from this hit
*/
typedef tuple<int inner_pos, int score, int percent_of_best_score> hit;
/*
string genome1ws - workspace of genome1 (depricated, use genome1ref instead)
string genome1id - id of genome1 (depricated, use genome1ref instead)
ws_genome_id genome1ref - reference to genome1
string genome2ws - workspace of genome2 (depricated, use genome2ref instead)
string genome2id - id of genome2 (depricated, use genome2ref instead)
ws_genome_id genome2ref - reference to genome2
float sub_bbh_percent - optional parameter, minimum percent of bit score compared to best bit score, default is 90
string max_evalue - optional parameter, maximum evalue, default is 1e-10
list<string> proteome1names - names of genes of genome1
mapping<string, int> proteome1map - map from genes of genome1 to their positions
list<string> proteome2names - names of genes of genome2
mapping<string, int> proteome2map - map from genes of genome2 to their positions
list<list<hit>> data1 - outer list iterates over positions of genome1 gene names, inner list iterates over hits from given gene1 to genome2
list<list<hit>> data2 - outer list iterates over positions of genome2 gene names, inner list iterates over hits from given gene2 to genome1
@optional genome1ws
@optional genome1id
@optional genome1ref
@optional genome2ws
@optional genome2id
@optional genome2ref
*/
typedef structure {
string genome1ws;
string genome1id;
ws_genome_id genome1ref;
string genome2ws;
string genome2id;
ws_genome_id genome2ref;
float sub_bbh_percent;
string max_evalue;
list<string> proteome1names;
mapping<string, int> proteome1map;
list<string> proteome2names;
mapping<string, int> proteome2map;
list<list<hit>> data1;
list<list<hit>> data2;
} ProteomeComparison;
};
|
[
"rsutormin@lbl.gov"
] |
rsutormin@lbl.gov
|
b60b31e90f5028990f00bc5dfc1af0a8a4428248
|
67d7397c64435ee7df782c3091709672cb6bcd1b
|
/dataFabricScripts/iRODS/usageScripts/SingleZone/usageFromICAT.py
|
ff39bdbd96d8fb08a7f4224868ded6404931ebeb
|
[] |
no_license
|
nesi/ARCS-systems
|
6984c88c9d1f7d966a79fc0f4c917b66e9d49396
|
79d41318e56730293435abe4ac4310e771f8e474
|
refs/heads/master
| 2020-05-17T15:48:32.048320
| 2012-12-08T07:03:09
| 2012-12-08T07:03:09
| 41,837,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
#!/usr/bin/python
import pgdb, sys
import os
import xml.dom.minidom
# ------------------------------------------------------------
# Assuming:
# - the DB name is ICAT
# - the owner of ICAT is rods
# ------------------------------------------------------------
class StatsExporter:
def __init__(self, _zone):
self.pgAuth = os.popen("cat ~/.pgpass").readlines()[0].split(':')
self.dbConn = pgdb.connect(host=self.pgAuth[0] + ':'+ self.pgAuth[1], user=self.pgAuth[3], database=self.pgAuth[2], password=self.pgAuth[4][:-1])
self.doc = xml.dom.minidom.Document()
self.root = self.doc.createElement('usages')
self.doc.appendChild(self.root)
self.zone = _zone
zoneNode = self.createChild(self.root, 'zone', self.zone)
self.cursor = None
def doQuery(self, query):
newCursor = self.dbConn.cursor()
newCursor.execute(query)
return newCursor
def createChild(self, parent, childName, childValue = None):
child = self.doc.createElement(childName)
if(childValue <> None):
childTxt = None
try:
childTxt = self.doc.createTextNode(childValue)
#possibly dogey...
except TypeError:
childTxt = self.doc.createTextNode(repr(childValue))
child.appendChild(childTxt)
parent.appendChild(child)
return child
def close(self):
self.dbConn.close()
#list name is assumed to be a plural...
def processResults(self, listName, rows):
elementList = self.createChild(self.root, listName)
lastUser = None
resourcesList = None
for row in rows:
curUser = row[0] + "@" + row[1]
if(curUser <> lastUser):
element = self.createChild(elementList, listName[:-1])
self.createChild(element, 'name', row[0])
self.createChild(element, 'zone', row[1])
resourceList = self.createChild(element, 'resources')
lastUser = curUser
resource = self.createChild(resourceList, 'resource')
self.createChild(resource, 'id', row[2])
self.createChild(resource, 'amount', row[3])
self.createChild(resource, 'count', repr(row[4])[:-1])
curUser = element
def addRecordToDoc(self, query, listName):
cur = self.doQuery(query)
results = cur.fetchall()
cur.close()
self.processResults(listName, results)
# ----------------------------------------------------------------------------------
# Assuming that we still follow the convention we have used in SRB:
# - associating the amount used/number of files with a group-based projects folder
# ----------------------------------------------------------------------------------
def work(self):
query = """SELECT dataTable.data_owner_name, dataTable.data_owner_zone,
dataTable.resc_name,
SUM(dataTable.data_size), COUNT(dataTable.data_id)
FROM
(SELECT object_id FROM r_objt_access WHERE access_type_id = 1200
EXCEPT
(SELECT object_id FROM r_objt_access WHERE user_id IN
(SELECT user_id FROM r_user_main WHERE user_type_name = 'rodsgroup'))) AS accessTable
INNER JOIN
(SELECT data_id, data_size, data_owner_name, data_owner_zone, resc_name FROM r_data_main ) AS dataTable
ON
accessTable.object_id = dataTable.data_id
GROUP BY dataTable.data_owner_name, dataTable.data_owner_zone, dataTable.resc_name
ORDER BY dataTable.data_owner_name, dataTable.data_owner_zone, dataTable.resc_name"""
self.addRecordToDoc(query, 'users')
query = """SELECT userTable.user_name, dataTable.data_owner_zone,
dataTable.resc_name,
SUM(dataTable.data_size), COUNT(dataTable.data_id)
FROM
r_user_main as userTable,
r_objt_access as accessTable,
r_data_main as dataTable
WHERE
userTable.user_type_name = 'rodsgroup' AND
userTable.user_id = accessTable.user_id AND
accessTable.object_id = dataTable.data_id
GROUP BY userTable.user_name, dataTable.data_owner_zone, dataTable.resc_name
ORDER BY userTable.user_name, dataTable.data_owner_zone, dataTable.resc_name"""
self.addRecordToDoc(query, 'projects')
def prettyPrint(self):
print self.doc.toprettyxml(encoding='utf-8')
if(__name__ == "__main__"):
#getResources()
lines = os.popen("cat ~/.irods/.irodsEnv|grep 'irodsZone'").readlines()
if(len(lines) == 1):
zone = lines[0].split("'")[1]
exporter = StatsExporter(zone)
exporter.work()
exporter.prettyPrint()
exporter.close()
#getProjectUsage()
|
[
"kai.lu@arcs.org.au"
] |
kai.lu@arcs.org.au
|
2d26d02539e1e4894e20095347999b7d5f48a0bd
|
bbe96b7552494b6baf4e84d6cd84fe9cb8192eb8
|
/src/data/data_class.py
|
074a6af2b213eb5e5033db83c39726fa314db432
|
[
"MIT"
] |
permissive
|
FFFinale/DeepFeatureIV
|
889fe63a5b38f92474cbf7845c9a2a920406dcdd
|
54b04e9e9e4c88d4859ea65d34ceb69dd1b58bc2
|
refs/heads/master
| 2022-12-29T16:24:50.134938
| 2020-10-17T06:26:55
| 2020-10-17T06:26:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
from typing import NamedTuple, Optional
import numpy as np
import torch
class TrainDataSet(NamedTuple):
treatment: np.ndarray
instrumental: np.ndarray
covariate: Optional[np.ndarray]
outcome: np.ndarray
structural: np.ndarray
class TestDataSet(NamedTuple):
treatment: np.ndarray
covariate: Optional[np.ndarray]
structural: np.ndarray
class TrainDataSetTorch(NamedTuple):
treatment: torch.Tensor
instrumental: torch.Tensor
covariate: torch.Tensor
outcome: torch.Tensor
structural: torch.Tensor
@classmethod
def from_numpy(cls, train_data: TrainDataSet):
covariate = None
if train_data.covariate is not None:
covariate = torch.tensor(train_data.covariate, dtype=torch.float32)
return TrainDataSetTorch(treatment=torch.tensor(train_data.treatment, dtype=torch.float32),
instrumental=torch.tensor(train_data.instrumental, dtype=torch.float32),
covariate=covariate,
outcome=torch.tensor(train_data.outcome, dtype=torch.float32),
structural=torch.tensor(train_data.structural, dtype=torch.float32))
def to_gpu(self):
covariate = None
if self.covariate is not None:
covariate = self.covariate.cuda()
return TrainDataSetTorch(treatment=self.treatment.cuda(),
instrumental=self.instrumental.cuda(),
covariate=covariate,
outcome=self.outcome.cuda(),
structural=self.structural.cuda())
class TestDataSetTorch(NamedTuple):
treatment: torch.Tensor
covariate: torch.Tensor
structural: torch.Tensor
@classmethod
def from_numpy(cls, test_data: TestDataSet):
covariate = None
if test_data.covariate is not None:
covariate = torch.tensor(test_data.covariate, dtype=torch.float32)
return TestDataSetTorch(treatment=torch.tensor(test_data.treatment, dtype=torch.float32),
covariate=covariate,
structural=torch.tensor(test_data.structural, dtype=torch.float32))
def to_gpu(self):
covariate = None
if self.covariate is not None:
covariate = self.covariate.cuda()
return TestDataSetTorch(treatment=self.treatment.cuda(),
covariate=covariate,
structural=self.structural.cuda())
|
[
""
] | |
25358c69ffe54ebd92674d7e9523c7316fbd6bfc
|
aa86784b429c01778342dc60e29d3b3a1356d481
|
/Exam2675.py
|
18930967c09b0007213753c803bc5e3288396149
|
[] |
no_license
|
kayjayk/algorithm-solution
|
7997be6143c84a72ab9ff3bb59d03e9037bc1b29
|
e529b7aa1cfa2c261a3adaedc2d8c3809003bf39
|
refs/heads/master
| 2022-12-10T17:53:17.033617
| 2020-09-05T17:19:18
| 2020-09-05T17:19:18
| 269,100,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import sys
T = int(sys.stdin.readline())
for i in range(T):
tmp = sys.stdin.readline().split(sep=' ')
R = int(tmp[0])
S = tmp[1].rstrip()
P = ''
for j in range(len(S)):
P += S[j]*R
print(P)
|
[
"noreply@github.com"
] |
kayjayk.noreply@github.com
|
d0c2b78d51b03c7a8c71a2765db09f2024018358
|
44dd70bab754a258bb2de9e4de05271ced7cc5f7
|
/project/urls.py
|
dbb3fe6a0e59a7923797e466b4f4e3861286fd8a
|
[] |
no_license
|
GruborIvan/MovieBackend
|
b841982746f81a126b7b7a97469ff93609af6053
|
dc9a8306b403bff15aed9e07306b0884ac6a08f1
|
refs/heads/master
| 2023-05-01T11:47:08.958461
| 2021-05-14T14:15:58
| 2021-05-14T14:15:58
| 357,474,886
| 0
| 0
| null | 2021-05-14T14:15:59
| 2021-04-13T08:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 460
|
py
|
from django.contrib import admin
from django.urls import path,include
from rest_framework_simplejwt.views import TokenObtainPairView,TokenRefreshView
urlpatterns = [
path('',include('myapi.urls')),
path('',include('user_watchlist.urls')),
path('',include('reactions.urls')),
path('admin/', admin.site.urls),
# JWT Authentication..
path('token', TokenObtainPairView.as_view()),
path('token/refresh', TokenRefreshView.as_view()),
]
|
[
"ivan.grubor@gmail.com"
] |
ivan.grubor@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.