blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9ab497015525833279bb4f7cb7b294f7e35efe7
|
5fd401dbc7b9ac782d387067c43a559971de5028
|
/modules/file/upload.py
|
5ad9b4a030323d70e88e69b37d7ef047af896c60
|
[] |
no_license
|
SagaieNet/weevely3
|
0b41be1bbd08a8ebde1e236775462483ad175c6e
|
c169bbf24807a581b3f61a455b9a43a5d48c8f52
|
refs/heads/master
| 2021-01-22T15:00:19.838734
| 2014-09-30T20:00:07
| 2014-09-30T20:00:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
from core.vectors import PhpCmd, ModuleCmd
from core.module import Module
from core import messages
from core.loggers import log
import random
import hashlib
import base64
class Upload(Module):
"""Upload file to remote filesystem."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments(
# Declare mandatory arguments
mandatory = [
'lpath',
'rpath'
],
# Declare additional options
optional = {
'content': '',
'vector': ''
},
bind_to_vectors = 'vector')
self.register_vectors(
[
PhpCmd(
"(file_put_contents('${rpath}', base64_decode('${content}'))&&print(1)) || print(0);",
name = 'file_put_contents'
),
PhpCmd(
"""($h=fopen("${rpath}","a+")&&fwrite($h, base64_decode('${content}'))&&fclose($h)&&print(1)) || print(0);""",
name = "fwrite"
)
]
)
def run(self, args):
# Load local file
content_orig = args.get('content')
if not content_orig:
lpath = args.get('lpath')
try:
content_orig = open(lpath, 'r').read()
except Exception, e:
log.warning(
messages.generic.error_loading_file_s_s % (lpath, str(e)))
return
content = base64.b64encode(content_orig)
# Check remote file existence
rpath_exists = ModuleCmd('file_check', [ args['rpath'], 'exists' ]).run()
if rpath_exists:
log.warning(messages.generic.error_file_s_already_exists % args['rpath'])
return
vector_name, result = self.vectors.find_first_result(
format_args = { 'args' : args, 'content' : content },
condition = lambda result: True if result == '1' else False
)
|
[
"emilio.pinn@gmail.com"
] |
emilio.pinn@gmail.com
|
6bf462112c68e100b92acc5b9b8ed814e8f09d27
|
ef4a1748a5bfb5d02f29390d6a66f4a01643401c
|
/algorithm/new_teacher_algorithm/AD/도약.py
|
5c781e9d4bc4c9a28efdc8ca127c58b5528ef92d
|
[] |
no_license
|
websvey1/TIL
|
aa86c1b31d3efc177df45503d705b3e58b800f8e
|
189e797ba44e2fd22a033d1024633f9e0128d5cf
|
refs/heads/master
| 2023-01-12T10:23:45.677578
| 2019-12-09T07:26:59
| 2019-12-09T07:26:59
| 162,102,142
| 0
| 1
| null | 2022-12-11T16:31:08
| 2018-12-17T08:57:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
import sys
sys.stdin = open("도약.txt")
###########################################################
########################## 두개 쓰기 ########################
###########################################################
# def lowerSearch(s,e,f):
# # f 이상 중에서 가장 작은 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] >= f: # f 이상이면 왼쪽영역 재탐색(더 작은 값 찾기 위해)
# sol = m
# e = m-1
# else:
# s= m+1 #우측탐색)
# return sol
#
# def upperSearch(s,e,f):
# # f 이하중에서 가장 큰 값의 위치를 리턴
# sol = -1
# while s<=e:
# m = (s+e)//2
# if data[m] <= f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
# sol = m
# s = m+1
# else:
# e= m-1
# return sol
# N = int(input())
# data = sorted([(int(input())) for i in range(N)])
# cnt = 0
# for i in range(N-2):
# for j in range(i+1, N-1):
# S = data[j]+(data[j]-data[i])
# E = data[j] + (data[j] - data[i])*2
# lo = lowerSearch(j+1, N-1, S)
# if lo==-1 or data[lo]>E: continue
# up = upperSearch(j+1, N-1, E)
# cnt += (up-lo+1)
# print(cnt)
###########################################################
########################## 하나 쓰기########################
###########################################################
def upperSearch(s,e,f):
# f 이하중에서 가장 큰 값의 위치를 리턴
sol = -1
while s<=e:
m = (s+e)//2
if data[m] < f: # 데이타 이하면 오른쪽 재탐색(더 큰걸 찾기위해)
s = m + 1
sol = m
else:
e= m-1
return sol
N = int(input())
data = sorted([(int(input())) for i in range(N)])
cnt = 0
for i in range(N-2):
for j in range(i+1, N-1):
S = data[j]+(data[j]-data[i])
E = data[j] + (data[j] - data[i])*2
cnt += upperSearch(j, N- 1, E+1) - upperSearch(j, N-1, S)
print(cnt)
|
[
"websvey1@gmail.com"
] |
websvey1@gmail.com
|
b563da1a4aa94a36c4599e6482162f6ded7d93e9
|
5b2218208aef68cf06609bcc3bf42b499d99d5f6
|
/docs/source/conf.py
|
e94bc7c7e29be180973b828865b19e7712c33ad6
|
[
"MIT"
] |
permissive
|
c137digital/unv_app_template
|
c36cacfff3e0be0b00ecad6365b20b434836ffe7
|
a1d1f2463334afc668cbf4e8acbf1dcaacc93e80
|
refs/heads/master
| 2020-05-25T19:24:17.098451
| 2020-04-24T19:33:08
| 2020-04-24T19:33:08
| 187,950,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,816
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
project = 'unv_app_template'
copyright = '2020, change'
author = 'change'
# The short X.Y version
version = '0.1'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'unv_app_templatedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'unv_app_template.tex', 'unv\\_template Documentation',
'change', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'unv_app_template', 'unv_app_template Documentation',
author, 'unv_app_template', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
[
"morty.space@gmail.com"
] |
morty.space@gmail.com
|
4c1e7c5233cb21118ed12162d9ba099c0665a80a
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/flex/models_20201029143145.py
|
b68119b214fd032d442ee5cf1b7492154ab67eb2
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.core import blocks as wagtail_
from streams import blocks
from home.models import new_table_options
class FlexPage(Page):
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(table_options=new_table_options)),
], null=True, blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class Meta:
verbose_name = 'Flex (misc) page'
verbose_name_plural = 'Flex (misc) pages'
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
71bfd188e3307f50316b5807460e05e6b0dab81e
|
0be27c0a583d3a8edd5d136c091e74a3df51b526
|
/int_long.py
|
09d9178607925a32fd93bcf2ea90ca80acb00f96
|
[] |
no_license
|
ssangitha/guvicode
|
3d38942f5d5e27a7978e070e14be07a5269b01fe
|
ea960fb056cfe577eec81e83841929e41a31f72e
|
refs/heads/master
| 2020-04-15T05:01:00.226391
| 2019-09-06T10:08:23
| 2019-09-06T10:08:23
| 164,405,935
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
n=int(input())
if(n>=-2**15+1 and n<=2**15+1):
print ("INT")
elif n>=-2**31+1 and n<=2**31+1:
print("LONG")
else:
print ("LONG LONG")
#..int,long...longlong
|
[
"noreply@github.com"
] |
ssangitha.noreply@github.com
|
e32bd0130a28604d940e0a1e7d79496057d8a0cb
|
66a9c25cf0c53e2c3029b423018b856103d709d4
|
/tests/live_test.py
|
b71930af68b02cc6137cb3b01a6f80f39c0ef9f3
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
fritzy/SleekXMPP
|
1b02d3e2b22efeb6bf3f8f487e6c0343b9b85baf
|
cc1d470397de768ffcc41d2ed5ac3118d19f09f5
|
refs/heads/develop
| 2020-05-22T04:14:58.568822
| 2020-02-18T22:54:57
| 2020-02-18T22:54:57
| 463,405
| 658
| 254
|
NOASSERTION
| 2023-06-27T20:05:54
| 2010-01-08T05:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,422
|
py
|
import logging
from sleekxmpp.test import *
class TestLiveStream(SleekTest):
"""
Test that we can test a live stanza stream.
"""
def tearDown(self):
self.stream_close()
def testClientConnection(self):
"""Test that we can interact with a live ClientXMPP instance."""
self.stream_start(mode='client',
socket='live',
skip=False,
jid='user@localhost/test',
password='user')
# Use sid=None to ignore any id sent by the server since
# we can't know it in advance.
self.recv_header(sfrom='localhost', sid=None)
self.send_header(sto='localhost')
self.recv_feature("""
<stream:features>
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.recv_feature("""
<proceed xmlns="urn:ietf:params:xml:ns:xmpp-tls" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">
<mechanism>DIGEST-MD5</mechanism>
<mechanism>PLAIN</mechanism>
</mechanisms>
</stream:features>
""")
self.send_feature("""
<auth xmlns="urn:ietf:params:xml:ns:xmpp-sasl"
mechanism="PLAIN">AHVzZXIAdXNlcg==</auth>
""")
self.recv_feature("""
<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl" />
""")
self.send_header(sto='localhost')
self.recv_header(sfrom='localhost', sid=None)
self.recv_feature("""
<stream:features>
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind" />
<session xmlns="urn:ietf:params:xml:ns:xmpp-session" />
</stream:features>
""")
# Should really use send, but our Iq stanza objects
# can't handle bind element payloads yet.
self.send_feature("""
<iq type="set" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<resource>test</resource>
</bind>
</iq>
""")
self.recv_feature("""
<iq type="result" id="1">
<bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">
<jid>user@localhost/test</jid>
</bind>
</iq>
""")
self.stream_close()
suite = unittest.TestLoader().loadTestsFromTestCase(TestLiveStream)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
tests = unittest.TestSuite([suite])
result = unittest.TextTestRunner(verbosity=2).run(tests)
test_ns = 'http://andyet.net/protocol/tests'
print("<tests xmlns='%s' %s %s %s %s />" % (
test_ns,
'ran="%s"' % result.testsRun,
'errors="%s"' % len(result.errors),
'fails="%s"' % len(result.failures),
'success="%s"' % result.wasSuccessful()))
|
[
"lancestout@gmail.com"
] |
lancestout@gmail.com
|
f5058ccbcc8449198222100dc98b9d6777472a89
|
2f6d017dedc68588b2615d65c1e8ca8bcdd90446
|
/api/deploy/write_json.py
|
01f5c1cf3ca63b0de2e90ca19f9b694b331c12f5
|
[] |
no_license
|
hysunflower/benchmark
|
70fc952a4eb1545208543627539d72e991cef78a
|
c14f99c15b4be9e11f56ea378ca15d9c3da23bab
|
refs/heads/master
| 2022-06-30T07:04:14.986050
| 2022-06-15T02:43:04
| 2022-06-15T02:43:04
| 224,449,279
| 1
| 0
| null | 2019-11-27T14:29:29
| 2019-11-27T14:29:29
| null |
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
#!/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import op_benchmark_unit
COMPARE_RESULT_SHOWS = {
"Better": "优于",
"Equal": "打平",
"Less": "差于",
"Unknown": "未知",
"Unsupport": "不支持",
"Others": "其他",
"Total": "汇总"
}
def create_summary_json(compare_result, category):
summary_json_result = list()
compare_result_colors = {"Better": "green", "Less": "red"}
compare_result_keys = compare_result.compare_result_keys
titles = {"title": 1, "row_0": category}
for (i, compare_result_key) in enumerate(compare_result_keys, 1):
titles["row_%i" % i] = COMPARE_RESULT_SHOWS[compare_result_key]
summary_json_result.append(titles)
for device in ["gpu", "cpu"]:
for direction in ["forward", "backward"]:
for method in ["total", "kernel"]:
if device == "cpu": continue
data = {
"title": 0,
"row_0": "{} {} ({})".format(device.upper(),
direction.capitalize(),
method)
}
value = compare_result.get(device, direction, method)
num_total_cases = value["Total"]
for (i, compare_result_key) in enumerate(compare_result_keys,
1):
num_cases = value[compare_result_key]
if num_cases > 0:
ratio = float(num_cases) / float(num_total_cases)
this_str = "{} ({:.2f}%)".format(num_cases,
ratio * 100)
else:
this_str = "--"
data["row_%i" % i] = this_str
summary_json_result.append(data)
return summary_json_result
def dump_json(benchmark_result_list, output_path=None):
"""
dump data to a json file
"""
if output_path is None:
print("Output path is not specified, will not dump json.")
return
compare_result_case_level = op_benchmark_unit.summary_compare_result(
benchmark_result_list)
compare_result_op_level = op_benchmark_unit.summary_compare_result_op_level(
benchmark_result_list)
with open(output_path, 'w') as f:
summary_case_json = create_summary_json(compare_result_case_level,
"case_level")
summary_op_json = create_summary_json(compare_result_op_level,
"case_level")
f.write(json.dumps(summary_case_json + summary_op_json))
|
[
"noreply@github.com"
] |
hysunflower.noreply@github.com
|
89189e31f7eff193f8991a28da369417a28ae86d
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/D2/D2_20190715파리퇴치.py
|
37273d877cfb93458b8b8fdef4531e610039777c
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478
| 2021-08-04T13:57:00
| 2021-08-04T13:57:00
| 197,157,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
def arrr(N) :
for i in range(N) :
inp = input().split(' ')
inp = [int(j) for j in inp]
fly.append(inp)
return fly
def max_cal(fly,N,M):
sum_num = 0
max_num = 0
for i in range(N-M+1) :
for j in range(N-M+1) :
for l in range(M) :
for m in range(M) :
sum_num += fly[l+i][m+j]
if max_num < sum_num :
max_num = sum_num
sum_num = 0
return(max_num)
T = int(input())
for a in range(T):
N = input().split(' ')
fly = []
fly = arrr(int(N[0]))
print('#{0} {1}'.format(a+1, max_cal(fly,int(N[0]),int(N[1]))))
|
[
"chanchanhwan@naver.com"
] |
chanchanhwan@naver.com
|
f21360c68557a49b1b4e4413627b85cd6737f75c
|
73c9211d5627594e0191510f0b4d70a907f5c4c5
|
/nn/keras_dataguru/lesson2/work2.py
|
4dcacb8f30dd2feaffbd330256b8915e94435bcf
|
[] |
no_license
|
tigerxjtu/py3
|
35378f270363532fb30962da8674dbcee99eb5ff
|
5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779
|
refs/heads/master
| 2021-07-13T05:34:15.080119
| 2020-06-24T09:36:33
| 2020-06-24T09:36:33
| 159,121,100
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# In[2]:
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print('x_shape:',x_train.shape) #(60000,28,28)
print('y_shape:',y_train.shape) #(60000,)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)
# In[8]:
# model=Sequential([Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')])
model=Sequential()
model.add(Dense(units=256,input_dim=x_train.shape[1],activation='relu'))
model.add(Dense(units=10,activation='softmax'))
sgd=SGD(lr=0.2)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
# In[9]:
model.fit(x_train,y_train,batch_size=32,epochs=10)
loss,accuracy=model.evaluate(x_test,y_test)
print('\ntest loss:',loss)
print('accuracy:',accuracy)
# In[ ]:
|
[
"liyin@16010.net"
] |
liyin@16010.net
|
9f719d70ff61b820cde1a602f393ba9c91b6514b
|
d83f50302702d6bf46c266b8117514c6d2e5d863
|
/number-of-boomerangs.py
|
7add2f05767711dbf020f6215cc8f92ec9b5a59c
|
[] |
no_license
|
sfdye/leetcode
|
19764a6bdb82de114a2c82986864b1b2210c6d90
|
afc686acdda4168f4384e13fb730e17f4bdcd553
|
refs/heads/master
| 2020-03-20T07:58:52.128062
| 2019-05-05T08:10:41
| 2019-05-05T08:10:41
| 137,295,892
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
ans = 0
for p in points:
d = collections.defaultdict(int)
for q in points:
d[(p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2] += 1
for k in d.values():
ans += k * (k - 1)
return ans
|
[
"tsfdye@gmail.com"
] |
tsfdye@gmail.com
|
840b8e213aaeafea3b9c2b03e58bd84996694d5a
|
bafcde124dd3af37ef14e322e0e76e82d8684469
|
/restapi/services/models/PasswordResetModel.py
|
47e24ce9d570005a767967ad9265625a49a108e4
|
[] |
no_license
|
mentimun-mentah/balihot-property-backend
|
1c7ac91c04f791ca55f5f97e872034fbc30a8d32
|
b715cc3988ca70d16dbe2e89839653af310fa091
|
refs/heads/master
| 2022-12-27T15:01:23.196310
| 2020-09-06T17:00:53
| 2020-09-06T17:00:53
| 268,197,437
| 1
| 0
| null | 2020-10-20T16:55:31
| 2020-05-31T02:46:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import uuid, os
from services.serve import db
from time import time
from flask import url_for
from sqlalchemy import func
from services.libs.MailSmtp import MailSmtp
class PasswordReset(db.Model):
__tablename__ = 'password_resets'
id = db.Column(db.String(100),primary_key=True)
email = db.Column(db.String(100),unique=True,index=True,nullable=False)
resend_expired = db.Column(db.Integer,nullable=True)
created_at = db.Column(db.DateTime,default=func.now())
def __init__(self,email: str):
self.email = email
self.resend_expired = int(time()) + 300 # add 5 minute expired
self.id = uuid.uuid4().hex
def send_email_reset_password(self) -> None:
link = os.getenv("APP_URL") + url_for('user.reset_password',token=self.id)
MailSmtp.send_email([self.email],'Reset Password','email/EmailResetPassword.html',link=link)
@property
def resend_is_expired(self) -> bool:
return int(time()) > self.resend_expired
def change_resend_expired(self) -> "PasswordReset":
self.resend_expired = int(time()) + 300 # add 5 minute expired
def save_to_db(self) -> None:
db.session.add(self)
db.session.commit()
def delete_from_db(self) -> None:
db.session.delete(self)
db.session.commit()
|
[
"nyomanpradipta120@gmail.com"
] |
nyomanpradipta120@gmail.com
|
014e8f1ddcd99487d99ffa878a6e6cfa7d50ed6c
|
d55bda4c4ba4e09951ffae40584f2187da3c6f67
|
/h/admin/views/groups.py
|
0caffe6dcf887350fc17bfffc50c9f1ecc8b64bc
|
[
"BSD-3-Clause",
"BSD-2-Clause-Views",
"BSD-2-Clause",
"MIT"
] |
permissive
|
ficolo/h
|
3d12f78fe95843b2a8f4fc37231363aa7c2868d9
|
31ac733d37e77c190f359c7ef5d59ebc9992e531
|
refs/heads/master
| 2021-01-15T21:08:17.554764
| 2016-06-09T15:42:01
| 2016-06-09T15:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
# -*- coding: utf-8 -*-
from pyramid.view import view_config
from h import models
from h import paginator
@view_config(route_name='admin_groups',
request_method='GET',
renderer='h:templates/admin/groups.html.jinja2',
permission='admin_groups')
@paginator.paginate
def groups_index(context, request):
return models.Group.query.order_by(models.Group.created.desc())
@view_config(route_name='admin_groups_csv',
request_method='GET',
renderer='csv',
permission='admin_groups')
def groups_index_csv(request):
groups = models.Group.query
header = ['Group name', 'Group URL', 'Creator username',
'Creator email', 'Number of members']
rows = [[group.name,
request.route_url('group_read',
pubid=group.pubid,
slug=group.slug),
group.creator.username,
group.creator.email,
len(group.members)] for group in groups]
filename = 'groups.csv'
request.response.content_disposition = 'attachment;filename=' + filename
return {'header': header, 'rows': rows}
def includeme(config):
config.scan(__name__)
|
[
"nick@whiteink.com"
] |
nick@whiteink.com
|
b9aeff68654c2ed50000a30879c2e21c640d81e5
|
0206ac23a29673ee52c367b103dfe59e7733cdc1
|
/src/nemo/compare_2nemo_simulations.py
|
041bbfd0229b247c34b4796abf04bc639b9483ae
|
[] |
no_license
|
guziy/RPN
|
2304a93f9ced626ae5fc8abfcc079e33159ae56a
|
71b94f4c73d4100345d29a6fbfa9fa108d8027b5
|
refs/heads/master
| 2021-11-27T07:18:22.705921
| 2021-11-27T00:54:03
| 2021-11-27T00:54:03
| 2,078,454
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,484
|
py
|
from collections import namedtuple
from pathlib import Path
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from nemo.nemo_yearly_files_manager import NemoYearlyFilesManager
__author__ = 'huziy'
# Compare 2 Nemo outputs
import matplotlib.pyplot as plt
import numpy as np
def main_compare_max_yearly_ice_conc():
"""
ice concentration
"""
var_name = ""
start_year = 1979
end_year = 1985
SimConfig = namedtuple("SimConfig", "path label")
base_config = SimConfig("/home/huziy/skynet3_rech1/offline_glk_output_daily_1979-2012", "ERAI-driven")
modif_config = SimConfig("/home/huziy/skynet3_rech1/one_way_coupled_nemo_outputs_1979_1985", "CRCM5")
nemo_manager_base = NemoYearlyFilesManager(folder=base_config.path, suffix="icemod.nc")
nemo_manager_modif = NemoYearlyFilesManager(folder=modif_config.path, suffix="icemod.nc")
icecov_base, icecov_ts_base = nemo_manager_base.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
icecov_modif, icecov_ts_modif = nemo_manager_modif.get_max_yearly_ice_fraction(start_year=start_year,
end_year=end_year)
lons, lats, bmp = nemo_manager_base.get_coords_and_basemap()
xx, yy = bmp(lons.copy(), lats.copy())
# Plot as usual: model, obs, model - obs
img_folder = Path("nemo/{}vs{}".format(modif_config.label, base_config.label))
if not img_folder.is_dir():
img_folder.mkdir(parents=True)
img_file = img_folder.joinpath("compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
gs = GridSpec(2, 3, width_ratios=[1, 1, 0.05])
cmap = cm.get_cmap("jet", 10)
diff_cmap = cm.get_cmap("RdBu_r", 10)
# base
ax = fig.add_subplot(gs[0, 0])
cs = bmp.contourf(xx, yy, icecov_base, cmap=cmap)
bmp.drawcoastlines(ax=ax)
ax.set_title(base_config.label)
# modif
ax = fig.add_subplot(gs[0, 1])
cs = bmp.contourf(xx, yy, icecov_modif, cmap=cmap, levels=cs.levels)
plt.colorbar(cs, cax=fig.add_subplot(gs[0, -1]))
bmp.drawcoastlines(ax=ax)
ax.set_title(modif_config.label)
# difference
ax = fig.add_subplot(gs[1, :])
cs = bmp.contourf(xx, yy, icecov_modif - icecov_base, cmap=diff_cmap, levels=np.arange(-1, 1.2, 0.2))
bmp.colorbar(cs, ax=ax)
bmp.drawcoastlines(ax=ax)
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
ax.set_title("{}-{}".format(modif_config.label, base_config.label))
plt.close(fig)
# Plot time series
img_file = img_folder.joinpath("ts_compare_yearmax_icecov_{}_vs_{}_{}-{}.pdf".format(
modif_config.label, base_config.label, start_year, end_year))
fig = plt.figure()
plt.plot(range(start_year, end_year + 1), icecov_ts_base, "b", lw=2, label=base_config.label)
plt.plot(range(start_year, end_year + 1), icecov_ts_modif, "r", lw=2, label=modif_config.label)
plt.legend()
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
plt.grid()
plt.xlabel("Year")
fig.tight_layout()
fig.savefig(str(img_file), bbox_inches="tight")
if __name__ == '__main__':
import application_properties
application_properties.set_current_directory()
main_compare_max_yearly_ice_conc()
|
[
"guziy.sasha@gmail.com"
] |
guziy.sasha@gmail.com
|
92e85c7b6e66817ecaf916d920cc1d86019397c2
|
fe9573bad2f6452ad3e2e64539361b8bc92c1030
|
/scapy_code/sniif_packet.py
|
97cbf240c0083c9937735a47714341cd1d7da111
|
[] |
no_license
|
OceanicSix/Python_program
|
e74c593e2e360ae22a52371af6514fcad0e8f41f
|
2716646ce02db00306b475bad97105b260b6cd75
|
refs/heads/master
| 2022-01-25T16:59:31.212507
| 2022-01-09T02:01:58
| 2022-01-09T02:01:58
| 149,686,276
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from scapy.all import *
def print_pkt(pkt):
print("---------------this is a new packet----------------------")
new_pkt = pkt[IP]
if new_pkt[ICMP]:
new_pkt.show()
sniff(filter= "icmp" , prn=print_pkt)
|
[
"byan0007@student.monash.edu"
] |
byan0007@student.monash.edu
|
83a912f2fd9bb92402ffe65df2ebaf7a667edd7e
|
e590449a05b20712d777fc5f0fa52097678c089b
|
/python-client/test/test_stash_appscode_com_v1alpha1_api.py
|
58eaf340d2c3e2c403e782c27e9854d90c2f4271
|
[
"Apache-2.0"
] |
permissive
|
Hardeep18/kube-openapi-generator
|
2563d72d9f95196f8ef795896c08e8e21cd1a08e
|
6607d1e208965e3a09a0ee6d1f2de7e462939150
|
refs/heads/master
| 2020-04-11T03:30:18.786896
| 2018-05-05T20:57:51
| 2018-05-05T20:57:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
# coding: utf-8
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.stash_appscode_com_v1alpha1_api import StashAppscodeComV1alpha1Api # noqa: E501
from swagger_client.rest import ApiException
class TestStashAppscodeComV1alpha1Api(unittest.TestCase):
"""StashAppscodeComV1alpha1Api unit test stubs"""
def setUp(self):
self.api = swagger_client.api.stash_appscode_com_v1alpha1_api.StashAppscodeComV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_create_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for create_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_collection_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_collection_namespaced_restic
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_delete_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for delete_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_get_stash_appscode_com_v1alpha1_api_resources(self):
"""Test case for get_stash_appscode_com_v1alpha1_api_resources
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_list_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for list_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_recovery_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_repository_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_repository_for_all_namespaces
"""
pass
def test_list_stash_appscode_com_v1alpha1_restic_for_all_namespaces(self):
"""Test case for list_stash_appscode_com_v1alpha1_restic_for_all_namespaces
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_patch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for patch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_read_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for read_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_replace_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for replace_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_recovery_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_recovery_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_repository_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_repository_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic
"""
pass
def test_watch_stash_appscode_com_v1alpha1_namespaced_restic_list(self):
"""Test case for watch_stash_appscode_com_v1alpha1_namespaced_restic_list
"""
pass
def test_watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_recovery_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_repository_list_for_all_namespaces
"""
pass
def test_watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces(self):
"""Test case for watch_stash_appscode_com_v1alpha1_restic_list_for_all_namespaces
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"tamal@appscode.com"
] |
tamal@appscode.com
|
c693954cad97f78d72668a79087d4930ccea1091
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-integrations/connectors/source-opsgenie/source_opsgenie/source.py
|
743694d15b54b6ca441b6e91b3a528af43f6b85c
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569
| 2023-08-25T13:13:11
| 2023-08-25T13:13:11
| 327,604,451
| 1
| 0
|
MIT
| 2021-01-07T12:24:20
| 2021-01-07T12:24:19
| null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from .streams import AlertLogs, AlertRecipients, Alerts, Incidents, Integrations, Services, Teams, Users, UserTeams
# Source
class SourceOpsgenie(AbstractSource):
@staticmethod
def get_authenticator(config: Mapping[str, Any]):
return TokenAuthenticator(config["api_token"], auth_method="GenieKey")
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
auth = self.get_authenticator(config)
api_endpoint = f"https://{config['endpoint']}/v2/account"
response = requests.get(
api_endpoint,
headers=auth.get_auth_header(),
)
return response.status_code == requests.codes.ok, None
except Exception as error:
return False, f"Unable to connect to Opsgenie API with the provided credentials - {repr(error)}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = self.get_authenticator(config)
args = {"authenticator": auth, "endpoint": config["endpoint"]}
incremental_args = {**args, "start_date": config.get("start_date", "")}
users = Users(**args)
alerts = Alerts(**incremental_args)
return [
alerts,
AlertRecipients(parent_stream=alerts, **args),
AlertLogs(parent_stream=alerts, **args),
Incidents(**incremental_args),
Integrations(**args),
Services(**args),
Teams(**args),
users,
UserTeams(parent_stream=users, **args),
]
|
[
"noreply@github.com"
] |
thomas-vl.noreply@github.com
|
a0b0afefa29d9867d2ca4e7ea95add21f514f525
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cognitiveservices/azure-mgmt-cognitiveservices/tests/disable_test_cli_mgmt_cognitiveservices.py
|
36137183535e3c620a46e8cea7f926e0adadefd9
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,767
|
py
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 14
# Methods Covered : 14
# Examples Total : 15
# Examples Tested : 13
# Coverage % : 87
# ----------------------
import unittest
import azure.mgmt.cognitiveservices
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtCognitiveServicesTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtCognitiveServicesTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.cognitiveservices.CognitiveServicesManagementClient
)
@unittest.skip('hard to test')
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_cognitiveservices(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
ACCOUNT_NAME = "myAccount"
LOCATION = "myLocation"
# /Accounts/put/Create Account Min[put]
BODY = {
"location": "West US",
"kind": "CognitiveServices",
"sku": {
"name": "S0"
},
"identity": {
"type": "SystemAssigned"
}
}
result = self.mgmt_client.accounts.create(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# /Accounts/put/Create Account[put]
BODY = {
"location": "West US",
"kind": "Emotion",
"sku": {
"name": "S0"
},
"properties": {
"encryption": {
"key_vault_properties": {
"key_name": "KeyName",
"key_version": "891CF236-D241-4738-9462-D506AF493DFA",
"key_vault_uri": "https://pltfrmscrts-use-pc-dev.vault.azure.net/"
},
"key_source": "Microsoft.KeyVault"
},
"user_owned_storage": [
{
"resource_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccountsfelixwatest"
}
]
},
"identity": {
"type": "SystemAssigned"
}
}
# result = self.mgmt_client.accounts.create(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# /Accounts/get/Get Usages[get]
result = self.mgmt_client.accounts.get_usages(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/List SKUs[get]
result = self.mgmt_client.accounts.list_skus(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/Get Account[get]
result = self.mgmt_client.accounts.get_properties(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/get/List Accounts by Resource Group[get]
result = self.mgmt_client.accounts.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
# /Accounts/get/List Accounts by Subscription[get]
result = self.mgmt_client.accounts.list()
# /ResourceSkus/get/Regenerate Keys[get]
result = self.mgmt_client.resource_skus.list()
# /Operations/get/Get Operations[get]
result = self.mgmt_client.operations.list()
# /Accounts/post/Regenerate Keys[post]
result = self.mgmt_client.accounts.regenerate_key(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, key_name="Key2")
# /Accounts/post/List Keys[post]
result = self.mgmt_client.accounts.list_keys(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
# /Accounts/patch/Update Account[patch]
BODY = {
"sku": {
"name": "S2"
}
}
# result = self.mgmt_client.accounts.update(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME, account=BODY)
# //post/Check SKU Availability[post]
SKUS = [
"S0"
]
result = self.mgmt_client.check_sku_availability(location="eastus", skus=SKUS, kind="Face", type="Microsoft.CognitiveServices/accounts")
# /Accounts/delete/Delete Account[delete]
result = self.mgmt_client.accounts.delete(resource_group_name=RESOURCE_GROUP, account_name=ACCOUNT_NAME)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
87db78fc9bb040bc77eeeb14ffba6ee78b8c43fa
|
42394bd8cd674dcd0822ae288ddb4f4e749a6ed6
|
/fluent_blogs/sitemaps.py
|
97da332b7a014536107d1f7fe042d295b321ac83
|
[
"Apache-2.0"
] |
permissive
|
mmggbj/django-fluent-blogs
|
4bca6e7effeca8b4cee3fdf4f8bb4eb4d192dfbe
|
7fc3220d6609fe0615ad6ab44044c671d17d06a3
|
refs/heads/master
| 2021-05-08T13:02:51.896360
| 2018-01-31T21:54:27
| 2018-01-31T21:54:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.sitemaps import Sitemap
from fluent_blogs.models import get_entry_model, get_category_model
from fluent_blogs.urlresolvers import blog_reverse
from parler.models import TranslatableModel
User = get_user_model()
EntryModel = get_entry_model()
CategoryModel = get_category_model()
class EntrySitemap(Sitemap):
"""
The sitemap definition for the pages created with django-fluent-blogs.
"""
def items(self):
qs = EntryModel.objects.published().order_by('-publication_date')
if issubclass(EntryModel, TranslatableModel):
# Note that .active_translations() can't be combined with other filters for translations__.. fields.
qs = qs.active_translations()
return qs.order_by('-publication_date', 'translations__language_code')
else:
return qs.order_by('-publication_date')
def lastmod(self, urlnode):
"""Return the last modification of the entry."""
return urlnode.modification_date
def location(self, urlnode):
"""Return url of an entry."""
return urlnode.url
class CategoryArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('categories').order_by().distinct()
return CategoryModel.objects.filter(id__in=only_ids)
def lastmod(self, category):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(categories=category).only('modification_date')
return lastitems[0].modification_date
def location(self, category):
"""Return url of an entry."""
return blog_reverse('entry_archive_category', kwargs={'slug': category.slug}, ignore_multiple=True)
class AuthorArchiveSitemap(Sitemap):
def items(self):
only_ids = EntryModel.objects.published().values('author').order_by().distinct()
return User.objects.filter(id__in=only_ids)
def lastmod(self, author):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date')
return lastitems[0].modification_date
def location(self, author):
"""Return url of an entry."""
return blog_reverse('entry_archive_author', kwargs={'slug': author.username}, ignore_multiple=True)
class TagArchiveSitemap(Sitemap):
def items(self):
# Tagging is optional. When it's not used, it's ignored.
if 'taggit' not in settings.INSTALLED_APPS:
return []
from taggit.models import Tag
only_instances = EntryModel.objects.published().only('pk')
# Until https://github.com/alex/django-taggit/pull/86 is merged,
# better use the field names directly instead of bulk_lookup_kwargs
return Tag.objects.filter(
taggit_taggeditem_items__object_id__in=only_instances,
taggit_taggeditem_items__content_type=ContentType.objects.get_for_model(EntryModel)
)
def lastmod(self, tag):
"""Return the last modification of the entry."""
lastitems = EntryModel.objects.published().order_by('-modification_date').filter(tags=tag).only('modification_date')
return lastitems[0].modification_date
def location(self, tag):
"""Return url of an entry."""
return blog_reverse('entry_archive_tag', kwargs={'slug': tag.slug}, ignore_multiple=True)
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
b149655165dbfc3253e689f968488cd68f3e18c6
|
3e660e22783e62f19e9b41d28e843158df5bd6ef
|
/script.me.syncsmashingfromgithub/smashingfavourites/scripts/oldscripts/smashingtvextended.py
|
23aa7191d67b111064220b6ce41ecbc4caa91859
|
[] |
no_license
|
monthou66/repository.smashingfavourites
|
a9603906236000d2424d2283b50130c7a6103966
|
f712e2e4715a286ff6bff304ca30bf3ddfaa112f
|
refs/heads/master
| 2020-04-09T12:14:34.470077
| 2018-12-04T10:56:45
| 2018-12-04T10:56:45
| 160,341,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,059
|
py
|
# -*- coding: utf-8 -*-
# opens tv channel or guide groups via smashingfavourites and / or keymap.
import os
import os.path
import xbmc
import sys
# make sure dvbviewer is running - enable and wait if necessary
def enable():
if not xbmc.getCondVisibility('System.HasAddon(pvr.dvbviewer)'):
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":7,"params":{"addonid":"pvr.dvbviewer","enabled":true}}')
xbmc.sleep(200)
# make sure dvbviewer is not running - disable if necessary
def disable():
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":8,"params":{"addonid":"pvr.dvbviewer","enabled":false}}')
# define terms... c = count
# f=0 for just pvr disabled f = 1 (value) if channels, f=2 (value) if guides, f=3 if radio, f=4 if recordings,
# f=5 if timers, f=6 if search, f=7 if recording / recorded files, f=8 for timeshift, f=9 for permanently enable,
# f=10 for remove enable check.
# g = group number (value)... g=3 for last channel group / guide group
# define f
a = sys.argv[1]
f = int(a)
def terms():
b = sys.argv[2]
c = 2
g = int(b)
# f=3
def radio():
xbmc.executebuiltin('ActivateWindow(Radio)')
exit()
# f=4
def recordings():
xbmc.executebuiltin('ActivateWindow(tvrecordings)')
exit()
# f=5
def timers():
xbmc.executebuiltin('ActivateWindow(tvtimers)')
exit()
# f=6
def search():
xbmc.executebuiltin('ActivateWindow(tvsearch)')
exit()
# pvr can be disabled for recorded files - f=7
def recordedfiles():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# pvr can be disabled for timeshift files - f=8
def timeshift():
xbmc.executebuiltin('Videos,smb://SourceTVRecordings/,return')
exit()
# print stars to show up in log and error notification
def printstar():
print "****************************************************************************"
print "****************************************************************************"
def error():
xbmc.executebuiltin('Notification(Check, smashingtv)')
exit()
# open channel or guide windows - f = 1,2
def opengroups():
if f == 1:
xbmc.executebuiltin('ActivateWindow(TVChannels)')
elif f == 2:
xbmc.executebuiltin('ActivateWindow(TVGuide)')
else:
xbmc.executebuiltin('Notification(Check, smashingtv)'); exit()
xbmc.executebuiltin('SendClick(28)')
xbmc.executebuiltin( "XBMC.Action(FirstPage)" )
# loop move down to correct group (if necessary)
if g > 1:
while (c <= g):
c = c + 1
xbmc.executebuiltin( "XBMC.Action(Down)" )
# open group if not using 'choose' option.
if g >=1:
xbmc.executebuiltin( "XBMC.Action(Select)" )
xbmc.executebuiltin( "XBMC.Action(Right)" )
xbmc.executebuiltin( "ClearProperty(SideBladeOpen)" )
# define file locations
def files():
SOURCEFILE = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv/enablefile'), "enablepvr.txt")
TARGET = os.path.join(xbmc.translatePath('special://userdata/favourites/smashingtv'), "enablepvr.txt")
# permanentenable:
# Copy pvrenable.txt to favourites/smashingtv folder as marker and enable pvr.dvbviewer - f=9
# check if SOURCEFILE exists - if not give an error message
# check if TARGET exists - if so give a notification 'already enabled'
# copy SOURCEFILE to TARGET, enable and close
def permanentenable():
if not os.path.isfile(SOURCEFILE):
printstar()
print "smashingtv problem - check userdata/favourites/smashingtv/enablefile folder for missing pvrenable.txt"
printstar()
error()
if os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(PVR is, already enabled)')
enable()
exit()
else:
shutil.copy(SOURCEFILE, TARGET)
xbmc.executebuiltin('Notification(PVR is, permanently enabled)')
enable()
exit()
#removepermanentcheck
# Remove pvrenable.txt from favourites/smashingtv folder f=10
def removepermanentcheck():
if not os.path.isfile(TARGET):
xbmc.executebuiltin('Notification(No PVR, lock found)')
disable()
exit()
else:
os.remove(TARGET)
xbmc.executebuiltin('Notification(PVR, unlocked)')
disable()
exit()
# Get on with it...
# disable or enable pvr.dvbviewer, exit if necessary, exit and print message if f is out of range
if f == 0:
disable()
exit()
elif f == 7 or f == 8:
disable()
elif f > 10 or f < 0:
printstar()
print "smashingtv exited 'cos f is out of range"
print "f is ",f
printstar()
error()
else:
enable()
if f == 1 or f == 2:
terms()
opengroups()
elif f == 3:
radio()
elif f == 4:
recordings()
elif f == 5:
timers()
elif f == 6:
search()
elif f == 7:
recordedfiles()
elif f == 8:
timeshift()
elif f == 9:
permanentenable()
enable()
elif f == 10:
removepermanentcheck()
disable()
else:
printstar()
print "smashingtv exited 'cos sumfink went rong"
printstar()
error()
|
[
"davemullane@gmail.com"
] |
davemullane@gmail.com
|
6042ebe496e64d755d312557f38f2f61d3e98e80
|
18e687608ff326fae4d1e1604cf452f086f99559
|
/classroom/admin.py
|
7da03566a6f4b009b1d4c24281b22e580476a82c
|
[
"Apache-2.0"
] |
permissive
|
nu11byt3s/lms-ml
|
955bad451ddcb78e464227294496ee0a92be08c0
|
c0ea63f09d4125b0ae9033fd8b0a4aab2604bb42
|
refs/heads/main
| 2023-08-13T08:09:53.097312
| 2021-10-12T09:51:31
| 2021-10-12T09:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
from django.contrib import admin
from .models import ClassFiles, ClassRoom, MemberShip, RoomStream, Comment
admin.site.register(ClassRoom)
admin.site.register(ClassFiles)
admin.site.register(MemberShip)
admin.site.register(RoomStream)
admin.site.register(Comment)
|
[
"farsin310yeariha9701@gmail.com"
] |
farsin310yeariha9701@gmail.com
|
f50a2c13091de7e6652bd032364405a6cb81e40a
|
f08336ac8b6f8040f6b2d85d0619d1a9923c9bdf
|
/148-sortList.py
|
deaa09cb9bde869ffaac11cb72d4a48498d2f6ed
|
[] |
no_license
|
MarshalLeeeeee/myLeetCodes
|
fafadcc35eef44f431a008c1be42b1188e7dd852
|
80e78b153ad2bdfb52070ba75b166a4237847d75
|
refs/heads/master
| 2020-04-08T16:07:47.943755
| 2019-02-21T01:43:16
| 2019-02-21T01:43:16
| 159,505,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
'''
148. Sort List
Sort a linked list in O(n log n) time using constant space complexity.
Example 1:
Input: 4->2->1->3
Output: 1->2->3->4
Example 2:
Input: -1->5->3->4->0
Output: -1->0->3->4->5
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def solve(self,head,length):
if length == 1: return head
i, curr = 0, head
while i+1 < length//2:
curr = curr.next
i += 1
tail = curr.next
curr.next = None
newHead = self.solve(head,length//2)
newTail = self.solve(tail,length-length//2)
currHead, currTail, ansHead = newHead, newTail, ListNode(0)
curr = ansHead
while currHead and currTail:
if currHead.val < currTail.val: curr.next = currHead; currHead = currHead.next
else: curr.next = currTail; currTail = currTail.next
curr = curr.next
if not currHead: curr.next = currTail
else: curr.next = currHead
return ansHead.next
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
l, curr = 0, head
while curr:
l += 1
curr = curr.next
if not l or l == 1: return head
else: return self.solve(head,l)
|
[
"marshallee413lmc@sina.com"
] |
marshallee413lmc@sina.com
|
24aeb181fe8842ceab8dcbdae1e7eae470e32e96
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/2091.py
|
2be5159255a480621d5ba476c77387286fc6d261
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def get_values(f,line):
choices = [];
for i in range(0,4):
if i+1 == line:
choices.extend(f.readline().split())
else:
f.readline()
return choices
if __name__ == "__main__":
with open('problem.txt','r') as f:
trials = int(f.readline())
for i in range(0,trials):
first = int(f.readline())
first_choices = get_values(f,first)
second = int(f.readline())
second_choices = get_values(f,second)
combined = []
for a in first_choices:
if a in second_choices:
combined.append(a)
if len(combined) == 1:
print "Case #%s: %s"%(i+1,combined[0])
elif len(combined) > 1:
print "Case #%s: %s"%(i+1,"Bad magician!")
else:
print "Case #%s: %s"%(i+1,"Volunteer cheated!")
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
94039fd9178ad4160ba0efb6f0dda17c0fde9816
|
8926a97e04be31c62a28ee9031520ca785f2947b
|
/flask/member_test/app3.py
|
1879796d52c5957ea5c6d9b35167283e4c2e6a95
|
[] |
no_license
|
ragu6963/kfq_pyhton
|
3d651357242892713f36ac12e31f7b586d6e7422
|
bdad24e7620e8102902e2f0a71c8486fb0ad86c9
|
refs/heads/master
| 2022-11-26T16:02:49.838263
| 2020-07-31T00:40:59
| 2020-07-31T00:40:59
| 276,516,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,306
|
py
|
from flask import Flask, request, render_template, redirect, url_for, jsonify, json
import pymysql, os, cx_Oracle
from flask_sqlalchemy import SQLAlchemy
from json import JSONEncoder
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "oracle://hr:hr@127.0.0.1:1521/xe"
# app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://root:qwer1234@localhost/test"
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
# id = db.Column(db.Integer, primary_key = True)
userid = db.Column(db.String(20), primary_key=True)
userpw = db.Column(db.String(20))
username = db.Column(db.String(20))
userage = db.Column(db.Integer)
usermail = db.Column(db.String(20))
useradd = db.Column(db.String(20))
usergender = db.Column(db.String(20))
usertel = db.Column(db.String(20))
def __repr__(self):
return "<userid %r,username %r>" % (self.id, self.username)
def __init__(self, userid, userpw, username, userage, usermail, useradd, usergender, usertel):
self.userid = userid
self.userpw = userpw
self.username = username
self.userage = userage
self.usermail = usermail
self.useradd = useradd
self.usergender = usergender
self.usertel = usertel
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/usersform", methods=["POST", "GET"])
def usersform():
if request.method == "GET":
return render_template("usersform.html")
else:
userid = request.form.get("userid")
userpw = request.form.get("userpw")
username = request.form.get("username")
userage = request.form.get("userage")
usermail = request.form.get("useremail")
useradd = request.form.get("useradd")
usergender = request.form.get("usergender")
usertel = request.form.get("usertel")
my_user = User(userid, userpw, username, userage, usermail, useradd, usergender, usertel)
db.session.add(my_user)
db.session.commit()
return redirect("/list")
@app.route("/list")
def list():
all_data = User.query.all()
return render_template("list.html", list=all_data)
@app.route("/content/<userid>")
def content(userid):
result = User.query.filter_by(userid=userid).one()
return render_template("content.html", list=result)
@app.route("/updateform/<userid>", methods=["GET"])
def updateformget(userid):
result = User.query.filter_by(userid=userid).one()
return render_template("updateform.html", list=result)
@app.route("/updateform", methods=["POST"])
def updateformpost():
my_user = User.query.get(request.form.get("userid"))
my_user.userid = request.form.get("userid")
my_user.userpw = request.form.get("userpw")
my_user.username = request.form.get("username")
my_user.userage = request.form.get("userage")
my_user.usermail = request.form.get("usermail")
my_user.useradd = request.form.get("useradd")
my_user.usergender = request.form.get("usergender")
my_user.usertel = request.form.get("usertel")
db.session.commit()
return redirect("/list")
@app.route("/deleteform/<userid>")
def deleteformget(userid):
my_data = User.query.get(userid)
db.session.delete(my_data)
db.session.commit()
return redirect("/list")
@app.route("/ajaxlist", methods=["GET"])
def ajaxlistget():
all_data = User.query.all()
return render_template("ajaxlist.html", list=all_data)
@app.route("/ajaxlist", methods=["POST"])
def ajaxlistpost():
userid = request.form.get("userid")
query = User.query.filter(User.userid.like("%" + userid + "%")).order_by(User.userid)
all_data = query.all()
result = []
for data in all_data:
result.append(data.toJSON())
# return jsonify(all_data)
return result
@app.route("/imglist")
def imglist():
print(os.path.dirname(__file__))
dirname = os.path.dirname(__file__) + "/static/img/"
filenames = os.listdir(dirname)
print(filenames)
return render_template("imglist.html", filenames=filenames)
if __name__ == "__main__":
db.create_all()
app.run(debug=True, port=8089)
|
[
"bhj1684@naver.com"
] |
bhj1684@naver.com
|
b40d364523a6a0d9a5830630e0d64197363cab87
|
95884a6b32f6831e68c95d7785bc968a56877121
|
/cifar_imagenet/models/cifar/momentumnet_restart_lookahead_vel_learned_scalar_clip_mom.py
|
ba44f69eff7577785b303b3c9d7d192514916fc3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
minhtannguyen/RAdam
|
d89c4c6ce1ce0dd95b0be3aa2c20e70ea62da8b0
|
44f403288df375bae0785cc82dd8c888eaaaa441
|
refs/heads/master
| 2020-08-09T07:53:50.601789
| 2020-02-17T06:17:05
| 2020-02-17T06:17:05
| 214,041,479
| 0
| 0
|
Apache-2.0
| 2019-10-09T23:11:14
| 2019-10-09T23:11:14
| null |
UTF-8
|
Python
| false
| false
| 7,243
|
py
|
# -*- coding: utf-8 -*-
"""
momentum net
"""
import torch
import torch.nn as nn
import math
from torch.nn.parameter import Parameter
__all__ = ['momentumnet_restart_lookahead_vel_learned_scalar_clip_mom']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, step_size=2.0, momentum=0.5):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
# for momentum net
self.step_size = Parameter(torch.tensor(step_size), requires_grad=True)
self.momentum = Parameter(torch.tensor(momentum), requires_grad=True)
def forward(self, invec):
x, y = invec[0], invec[1]
residualx = x
residualy = y
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residualx = self.downsample(x)
residualy = self.downsample(y)
outy = residualx - self.step_size*out
outx = (1.0 + self.momentum) * outy - self.momentum * residualy
return [outx, outy]
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, step_size=2.0, momentum=0.5):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# for momentum net
self.step_size = Parameter(torch.tensor(step_size), requires_grad=True)
self.momentum = Parameter(torch.tensor(momentum), requires_grad=True)
def forward(self, invec):
x, prex = invec[0], invec[1]
residualx = x
residualprex = prex
x = x + torch.clamp(input=self.momentum, min=0.0, max=1.0) * prex
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residualx = self.downsample(residualx)
residualprex = torch.zeros_like(out)
outprex = torch.clamp(input=self.momentum, min=0.0, max=1.0) * residualprex - self.step_size * out
outx = residualx + outprex
return [outx, outprex]
class MomentumNet(nn.Module):
def __init__(self, depth, step_size=2.0, momentum=0.5, num_classes=1000, block_name='BasicBlock', feature_vec='x'):
super(MomentumNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
# for momentum net
self.step_size = step_size
self.momentum = momentum
self.feature_vec = feature_vec
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n, step_size=self.step_size, momentum=self.momentum)
self.layer2 = self._make_layer(block, 32, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.layer3 = self._make_layer(block, 64, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, step_size=2.0, momentum=0.5):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, step_size=step_size, momentum=momentum))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, step_size=step_size, momentum=momentum))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
out = [x, torch.zeros_like(x)]
out = self.layer1(out) # 32x32
out = self.layer2(out) # 16x16
out = self.layer3(out) # 8x8
if self.feature_vec=='x':
x = out[0]
else:
x = out[1]
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def momentumnet_restart_lookahead_vel_learned_scalar_clip_mom(**kwargs):
"""
Constructs a ResNet model.
"""
return MomentumNet(**kwargs)
# def momentum_net20(**kwargs):
# return MomentumNet(num_classes=10, depth=20, block_name="basicblock")
# def momentum_net56(**kwargs):
# return MomentumNet(num_classes=10, depth=56, block_name="bottleneck")
# def momentum_net110(**kwargs):
# return MomentumNet(num_classes=10, depth=110, block_name="bottleneck")
# def momentum_net164(**kwargs):
# return MomentumNet(num_classes=10, depth=164, block_name="bottleneck")
# def momentum_net290(**kwargs):
# return MomentumNet(num_classes=10, depth=290, block_name="bottleneck")
|
[
"mn15@rice.edu"
] |
mn15@rice.edu
|
066f467af220cf2210d237b5be14036c9a366795
|
c4ee4a9d28425aa334038ad174c7b1d757ff45db
|
/py/survey_backdeck/SurveyBackdeckDB.py
|
425ea9aadef024c30c34e6d6199380a96c70f5d1
|
[
"MIT"
] |
permissive
|
nwfsc-fram/pyFieldSoftware
|
32b3b9deb06dba4a168083a77336613704c7c262
|
477ba162b66ede2263693cda8c5a51d27eaa3b89
|
refs/heads/master
| 2023-08-03T07:38:24.117376
| 2021-10-20T22:49:51
| 2021-10-20T22:49:51
| 221,750,910
| 1
| 1
|
MIT
| 2023-07-20T13:13:25
| 2019-11-14T17:23:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
__author__ = 'Todd.Hay'
# -------------------------------------------------------------------------------
# Name: TrawlBackdeckDB.py
# Purpose: Provides connection to the trawl_backdeck.db SQLite database
# Author: Todd.Hay
# Email: Todd.Hay@noaa.gov
#
# Created: Jan 08, 2016
# License: MIT
#-------------------------------------------------------------------------------
import unittest
from py.common import CommonDB
class HookAndLineHookCutterDB(CommonDB.CommonDB):
"""
Subclass the CommonDB class, which makes the actual database connection
"""
def __init__(self, db_filename="hookandline_cutter.db"):
super().__init__(db_filename)
class TestTrawlBackdeckDB(unittest.TestCase):
"""
Test basic SQLite connectivity
"""
def setUp(self):
self._db = HookAndLineHookCutterDB('hookandline_cutter.db')
def tearDown(self):
self._db.disconnect()
def test_query(self):
count = 0
for t in self._db.execute('SELECT * FROM SETTINGS'):
count += 1
self.assertGreater(count, 200)
if __name__ == '__main__':
unittest.main()
|
[
"will.smith@noaa.gov"
] |
will.smith@noaa.gov
|
56cb0a401e086cffa893b8b4dcd75edf07ca9e4c
|
dacf092e82b5cc841554178e5117c38fd0b28827
|
/day24_program4/server_start.py
|
80694736d2929c195b9ccc3caf71539ee8767758
|
[] |
no_license
|
RainMoun/python_programming_camp
|
f9bbee707e7468a7b5d6633c2364f5dd75abc8a4
|
f8e06cdd2e6174bd6986d1097cb580a6a3b7201f
|
refs/heads/master
| 2020-04-15T11:27:09.680587
| 2019-04-06T02:21:14
| 2019-04-06T02:21:14
| 164,630,838
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
import socket
import os
from conf import setting
from interface import common_interface
from db import models, db_handler
import logging.config
def upload_file(): # 接收文件
file_path = os.path.join(BASE_DIR, 'db', 'file_upload')
if not os.path.exists(file_path):
os.makedirs(file_path)
path = os.path.join(BASE_DIR, 'db', 'file_upload', file_name)
if not os.path.exists(path):
f = open(path, 'w')
f.close()
f = open(path, 'ab')
has_received = 0
while has_received != file_size:
data_once = conn.recv(1024)
f.write(data_once)
has_received += len(data_once)
f.close()
file_md5_finish = common_interface.get_file_md5(path)
if file_md5_finish == file_md5:
file_upload = models.File(file_name, file_size, file_md5, admin_name)
db_handler.save_upload_file_message(file_upload)
logging.info('{} upload {}, the md5 is {}'.format(admin_name, file_name, file_md5))
print('{} upload {}, the md5 is {}'.format(admin_name, file_name, file_md5))
func_dict = {
'post': upload_file
}
if __name__ == '__main__':
sk = socket.socket()
sk.bind(setting.SERVER_ADDRESS)
sk.listen(3)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
while True:
conn, addr = sk.accept()
while True:
data = conn.recv(1024)
print(data.decode('utf-8'))
flag, admin_name, file_name, file_size, file_md5 = data.decode('utf-8').split('|')
file_size = int(file_size)
func_dict[flag]()
break
sk.close()
|
[
"775653143@qq.com"
] |
775653143@qq.com
|
50a805dc8a946792bd4c670cf4d42f3317b82c4d
|
5925c0fc934760f1726818a18d6098499dcfb981
|
/GAN_upscale_28x28/GAN_upscale_28x28.py
|
0c4f60692043ba8d1aaf07a121723c78c1911750
|
[
"MIT"
] |
permissive
|
PsycheShaman/Keras-GAN
|
64b4b20471f4b185580860f28caa9320ed615a51
|
9a1f2576af8f67fad7845421ea5feb53012c1c9f
|
refs/heads/master
| 2020-06-17T11:03:23.993616
| 2019-09-14T08:04:25
| 2019-09-14T08:04:25
| 195,858,154
| 2
| 0
|
MIT
| 2019-09-14T08:04:26
| 2019-07-08T17:33:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,859
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 12:14:02 2019
@author: gerhard
"""
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import sys
import numpy as np
import glob
import pickle
def load_data():
x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\cnn\\x_*.pkl")
with open(x_files[0],'rb') as x_file:
x = pickle.load(x_file)
for i in x_files[1:]:
print(i)
with open(i,'rb') as x_file:
print(i)
xi = pickle.load(x_file)
x = np.concatenate((x,xi),axis=0)
print(x.shape)
return(x)
def scale(x, out_range=(-1, 1)):
domain = np.min(x), np.max(x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
class GAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 5
optimizer_discr = Adam(0.0004, 0.5)
optimizer_gen = Adam(0.0001, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_discr,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer_gen)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
X_train = load_data()
new_x = np.zeros((X_train.shape[0],28,28))
for i in range(0,X_train.shape[0]):
x_new_i = np.zeros((28,28))
x_old_i = X_train[i,:,:]
x_new_i[5:x_old_i.shape[0]+5,2:x_old_i.shape[1]+2] = x_old_i
new_x[i,:,:] = x_new_i
X_train = new_x
del new_x
# Rescale -1 to 1
# X_train = X_train / 127.5 - 1.
X_train = scale(X_train)
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
# valid = np.ones((batch_size, 1))
# fake = np.zeros((batch_size, 1))
valid = np.full(shape=(batch_size,1),fill_value=0.975)
fake = np.full(shape=(batch_size,1),fill_value=0.025)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
# r, c = 5, 5
noise = np.random.normal(0, 1, (2, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
plt.imshow(gen_imgs[1,:,:,0],cmap='gray')
# fig, axs = plt.subplots(r, c)
# cnt = 0
# for i in range(r):
# for j in range(c):
# axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
# axs[i,j].axis('off')
# cnt += 1
plt.savefig("images/%d.png" % epoch)
plt.close()
if __name__ == '__main__':
gan = GAN()
gan.train(epochs=30000, batch_size=32, sample_interval=10)
|
[
"christiaan.viljoen@cern.ch"
] |
christiaan.viljoen@cern.ch
|
babc9118bdcf7d2c25b4dc9e551d807e63b0a18f
|
6b6c00e637e4659e4c960ff5dc48c8b25f09c543
|
/myihome/web_html.py
|
86d5f26bfaab5886725ef16013e4324d6595659e
|
[] |
no_license
|
too-hoo/myiHome
|
881d24f7835a1d6f95fea8cc97d68194d078d451
|
54c57755f5c9a700330cb38a47fe88e4f9f7ab3a
|
refs/heads/master
| 2020-07-13T11:26:53.915246
| 2019-09-04T11:43:01
| 2019-09-04T11:43:01
| 205,073,018
| 9
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 静态文件加载的显示文件
from flask import Blueprint, current_app, make_response
from flask_wtf import csrf # 引入CSRF防御
# 提供静态文件的蓝图
html = Blueprint("web_html", __name__)
# 127.0.0.1:5000/()
# 127.0.0.1:5000/(index.html)
# 127.0.0.1:5000/(register.html)
# 127.0.0.1:5000/(favico.ico) # 浏览器会自己请求这个资源,它是网站的标志
# 可能什么都提取不到也有可能提取到一个文件名.*代表最少是0个,html_file_name对应的是我们提取的文件名字
@html.route("/<re(r'.*'):html_file_name>")
def get_html(html_file_name):
"""提供html文件"""
# 可以直接到静态文件哪里找到返回,也可以使用flask提供的一个方法current_app.send_static_file,专门让我们返回静态文件的
# 如果html_file_name为空,表示访问的路径为/ , 请求的是主页,直接等于index.html即可
if not html_file_name:
html_file_name = 'index.html'
# 如果html_file_name不是favicon.ico
if html_file_name != 'favicon.ico':
html_file_name = 'html/' + html_file_name # 直接拼接html/
# 创建一个csrf_token的值
csrf_token = csrf.generate_csrf()
# flask 提供的返回静态文件的方法,默认是到static目录下面去找
# flask 提供的返回静态文件的方法, 在返回之前先使用make_response接受一下响应体设置cookie之后再返回
resp = make_response(current_app.send_static_file(html_file_name))
# 设置cookie值包含CSRF的token值
resp.set_cookie('csrf_token', csrf_token)
return resp
|
[
"13414851554@163.com"
] |
13414851554@163.com
|
328588836d307293f0d666768429a1b18e4e1007
|
3a1c5ae00324fc26728ad9549cd4cf710a6dfcca
|
/trackmeapp/serializers.py
|
ecb9872ce6f7e0f07739fb553cc72ca93c734d99
|
[] |
no_license
|
EnockOMONDI/TRACK-ME
|
c39b3f6ed04957a011c96735526475dae596941c
|
aff6704f2ce1645a6b7f044a52abf0769de7d604
|
refs/heads/master
| 2021-06-24T22:26:49.841861
| 2019-06-24T07:19:22
| 2019-06-24T07:19:22
| 193,167,979
| 0
| 0
| null | 2021-06-10T21:37:58
| 2019-06-21T22:46:08
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
py
|
from rest_framework import serializers
from trackmeapp.models import Task
# transforming objects to JSON and vice versa
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('item_id', 'title', 'description', 'created_at', 'comp_date', 'status')
|
[
"enockomondike@gmail.com"
] |
enockomondike@gmail.com
|
4835bc6e70d40dff4467464c94615a922a6eeb0d
|
82ef9a0dd1618a28770597227acfc0150b948af2
|
/wearnow/tex/plug/_plugin.py
|
dd765acd80043d80efc8cafe2aca871e1af2e00f
|
[] |
no_license
|
bmcage/wearnow
|
ef32a7848472e79e56763b38551835aa97864b21
|
c8dfa75e1ea32b0c021d71c4f366ab47104c207e
|
refs/heads/master
| 2021-01-16T00:27:59.597812
| 2016-01-19T11:55:03
| 2016-01-19T11:55:03
| 37,195,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
#
# WearNow - a GTK+/GNOME based program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the base class for plugins.
"""
class Plugin(object):
"""
This class serves as a base class for all plugins that can be registered
with the plugin manager
"""
def __init__(self, name, description, module_name):
"""
:param name: A friendly name to call this plugin.
Example: "GEDCOM Import"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will import a GEDCOM file into a database"
:type description: string
:param module_name: The name of the module that contains this plugin.
Example: "gedcom"
:type module_name: string
:return: nothing
"""
self.__name = name
self.__desc = description
self.__mod_name = module_name
def get_name(self):
"""
Get the name of this plugin.
:return: a string representing the name of the plugin
"""
return self.__name
def get_description(self):
"""
Get the description of this plugin.
:return: a string that describes the plugin
"""
return self.__desc
def get_module_name(self):
"""
Get the name of the module that this plugin lives in.
:return: a string representing the name of the module for this plugin
"""
return self.__mod_name
|
[
"benny.malengier@gmail.com"
] |
benny.malengier@gmail.com
|
35d145eae9baf5f65b192fdad8fad0e7408f07eb
|
9eff544e604f9cff4384d4154204ab3276a56a23
|
/mpsci/distributions/gumbel_max.py
|
c6080c33985af06b004adc47893c0f19297ed96c
|
[
"BSD-2-Clause"
] |
permissive
|
WarrenWeckesser/mpsci
|
844d22b0230bc8fb5bf2122e7705c6fd1f0744d9
|
cd4626ee34680627abd820d9a80860b45de1e637
|
refs/heads/main
| 2023-08-31T11:01:11.412556
| 2023-08-25T20:30:43
| 2023-08-25T20:30:43
| 159,705,639
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,575
|
py
|
"""
Gumbel probability distribution (for maxima)
--------------------------------------------
This is the same distribution as:
* `scipy.stats.gumbel_r`;
* NumPy's `numpy.random.Generator.gumbel`;
* the Gumbel distribution discussed in the wikipedia article
"Gumbel distribtion" (https://en.wikipedia.org/wiki/Gumbel_distribution);
* the Type I extreme value distribution used in the text "An Introduction
to Statistical Modeling of Extreme Values" by Stuart Coles (Springer, 2001);
* the Gumbel distribution given in the text "Modelling Extremal Events" by
Embrechts, Klüppelberg and Mikosch (Springer, 1997);
* the Gumbel distribution in the text "Statistical Distribution" (fourth ed.)
by Forbes, Evans, Hastings and Peacock (Wiley, 2011);
* the `extreme_value_distribution` class implemented in the Boost/math C++
library;
* the `Gumbel` distribution in the Rust `rand_distr` crate.
"""
from mpmath import mp
from .. import stats
from mpsci.stats import mean as _mean
from ._common import _seq_to_mp
__all__ = ['pdf', 'logpdf', 'cdf', 'invcdf', 'sf', 'invsf', 'mean', 'var',
'nll', 'mle', 'mom']
def pdf(x, loc, scale):
"""
Probability density function for the Gumbel distribution (for maxima).
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return mp.exp(logpdf(x, loc, scale))
def logpdf(x, loc, scale):
"""
Log of the PDF of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return -(z + mp.exp(-z)) - mp.log(scale)
def cdf(x, loc, scale):
"""
Cumulative distribution function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return mp.exp(-mp.exp(-z))
def invcdf(p, loc, scale):
"""
Inverse of the CDF for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
p = mp.mpf(p)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = -mp.log(-mp.log(p))
x = scale*z + loc
return x
def sf(x, loc, scale):
"""
Survival function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
x = mp.mpf(x)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = (x - loc) / scale
return -mp.expm1(-mp.exp(-z))
def invsf(p, loc, scale):
"""
Inverse of the survival function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
p = mp.mpf(p)
loc = mp.mpf(loc)
scale = mp.mpf(scale)
z = -mp.log(-mp.log1p(-p))
x = scale*z + loc
return x
def mean(loc, scale):
"""
Mean of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return loc + mp.euler*scale
def var(loc, scale):
"""
Variance of the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
return mp.pi**2/6 * scale**2
def nll(x, loc, scale):
"""
Negative log-likelihood function for the Gumbel distribution.
"""
if scale <= 0:
raise ValueError('scale must be positive.')
with mp.extradps(5):
loc = mp.mpf(loc)
scale = mp.mpf(scale)
n = len(x)
z = [(mp.mpf(xi) - loc)/scale for xi in x]
t1 = n*mp.log(scale)
t2 = mp.fsum(z)
t3 = mp.fsum([mp.exp(-zi) for zi in z])
return t1 + t2 + t3
def _mle_scale_func(scale, x, xbar):
emx = [mp.exp(-xi/scale) for xi in x]
s1 = mp.fsum([xi * emxi for xi, emxi in zip(x, emx)])
s2 = mp.fsum(emx)
return s2*(xbar - scale) - s1
def _solve_mle_scale(x):
xbar = stats.mean(x)
# Very rough guess of the scale parameter:
s0 = stats.std(x)
if s0 == 0:
# The x values are all the same.
return s0
# Find an interval in which there is a sign change of
# _mle_scale_func.
s1 = s0
s2 = s0
sign2 = mp.sign(_mle_scale_func(s2, x, xbar))
while True:
s1 = 0.9*s1
sign1 = mp.sign(_mle_scale_func(s1, x, xbar))
if (sign1 * sign2) <= 0:
break
s2 = 1.1*s2
sign2 = mp.sign(_mle_scale_func(s2, x, xbar))
if (sign1 * sign2) <= 0:
break
# Did we stumble across the root while looking for an interval
# with a sign change? Not likely, but check anyway...
if sign1 == 0:
return s1
if sign2 == 0:
return s2
root = mp.findroot(lambda t: _mle_scale_func(t, x, xbar),
[s1, s2], solver='anderson')
return root
def _mle_scale_with_fixed_loc(scale, x, loc):
z = [(xi - loc) / scale for xi in x]
ez = [mp.expm1(-zi)*zi for zi in z]
return stats.mean(ez) + 1
def mle(x, loc=None, scale=None):
"""
Maximum likelihood estimates for the Gumbel distribution.
`x` must be a sequence of numbers--it is the data to which the
Gumbel distribution is to be fit.
If either `loc` or `scale` is not None, the parameter is fixed
at the given value, and only the other parameter will be fit.
Returns maximum likelihood estimates of the `loc` and `scale`
parameters.
Examples
--------
Imports and mpmath configuration:
>>> from mpmath import mp
>>> mp.dps = 20
>>> from mpsci.distributions import gumbel_max
The data to be fit:
>>> x = [6.86, 14.8 , 15.65, 8.72, 8.11, 8.15, 13.01, 13.36]
Unconstrained MLE:
>>> gumbel_max.mle(x)
(mpf('9.4879877926148360358863'), mpf('2.727868138859403832702'))
If we know the scale is 2, we can add the argument `scale=2`:
>>> gumbel_max.mle(x, scale=2)
(mpf('9.1305625326153555632872'), mpf('2.0'))
"""
with mp.extradps(5):
x = _seq_to_mp(x)
if scale is None and loc is not None:
# Estimate scale with fixed loc.
loc = mp.mpf(loc)
# Initial guess for findroot.
s0 = stats.std([xi - loc for xi in x])
scale = mp.findroot(
lambda t: _mle_scale_with_fixed_loc(t, x, loc), s0
)
return loc, scale
if scale is None:
scale = _solve_mle_scale(x)
else:
scale = mp.mpf(scale)
if loc is None:
ex = [mp.exp(-xi / scale) for xi in x]
loc = -scale * mp.log(stats.mean(ex))
else:
loc = mp.mpf(loc)
return loc, scale
def mom(x):
"""
Method of moments parameter estimation for the Gumbel-max distribution.
x must be a sequence of real numbers.
Returns (loc, scale).
"""
with mp.extradps(5):
M1 = _mean(x)
M2 = _mean([mp.mpf(t)**2 for t in x])
scale = mp.sqrt(6*(M2 - M1**2))/mp.pi
loc = M1 - scale*mp.euler
return loc, scale
|
[
"warren.weckesser@gmail.com"
] |
warren.weckesser@gmail.com
|
af43c669459f8dd1c60ac2dae04418f9744f6a29
|
44a7b4879c1da661cc2e8aa51c7fcc24cfb0fd3b
|
/tests/psu/psu_version_test.py
|
b3942b4c3523ed8b6ec2b0b234eb1705e2bb042b
|
[
"MIT"
] |
permissive
|
seoss/scs_core
|
21cd235c9630c68f651b9a8c88120ab98fe5f513
|
a813f85f86b6973fa77722a7d61cc93762ceba09
|
refs/heads/master
| 2021-08-08T08:09:56.905078
| 2020-04-16T19:46:52
| 2020-04-16T19:46:52
| 156,239,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
#!/usr/bin/env python3
"""
Created on 13 Nov 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import json
from scs_core.data.json import JSONify
from scs_core.psu.psu_version import PSUVersion
# --------------------------------------------------------------------------------------------------------------------
jstr = '{"id": "South Coast Science PSU", "tag": "1.2.3", "c-date": "Aug 8 2017", "c-time": "08:35:25"}'
print(jstr)
print("-")
jdict = json.loads(jstr)
print(jdict)
print("-")
status = PSUVersion.construct_from_jdict(jdict)
print(status)
print("-")
jdict = status.as_json()
print(jdict)
print("-")
jstr = JSONify.dumps(jdict)
print(jstr)
print("-")
|
[
"bruno.beloff@southcoastscience.com"
] |
bruno.beloff@southcoastscience.com
|
3206f80ebb7cd794ba25b8f13f14e0c5a68d2477
|
6f8c5d58ccd771d1ba92dc053b54caa44be9515c
|
/inst_fluid_en/models.py
|
7fba96ece1c1d3dfd9ed2d96070362bda8abf302
|
[
"MIT"
] |
permissive
|
manumunoz/8004
|
b6f32cf446a3d0d66988d34a03a252ba4d885da1
|
06b5a5a8cdea53294ff85c4bedb6be163d2da25a
|
refs/heads/master
| 2020-03-24T22:24:37.155460
| 2019-03-28T07:02:18
| 2019-03-28T07:02:18
| 143,083,725
| 0
| 0
|
NOASSERTION
| 2018-12-07T15:17:27
| 2018-08-01T00:29:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,478
|
py
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import itertools
author = 'Manu Munoz'
doc = """
Identity Switch - Networks: Instructions FLUID
"""
class Constants(BaseConstants):
#------------------------------------------
name_in_url = 'inst_fluid_en'
names = ['1','2','3','4','5','6','7']
players_per_group = len(names)
instructions_template = 'inst_fluid_en/Instructions.html'
periods = 1
num_rounds = periods
#------------------------------------------
# Treatment & Group parameters
others = len(names) - 1
total_circles = 4
total_triangles = 3
part_name = 1
part_fixed = 2
part_fluid = 3
part_alloc = 4
rounds_fixed = 10
#------------------------------------------
# Payoffs
exp_currency = "points"
currency = "pesos"
currency_exchange = 1000
points_exchange = 1
min_pay = 10000
link_cost = 2
liked_gain = 6
disliked_gain = 4
switch_cost = 6
#------------------------------------------
# Group Names
group_a = 'Lions' #Leones
group_b = 'Tigers' #Tigres
group_c = 'Leopards' #Leopardos
group_d = 'Jaguars' #Jaguares
group_e = 'Cats' #Gatos
group_f = 'Coyotes' #Coyotes
group_g = 'Jackals' #Chacales
group_h = 'Wolves' #Lobos
group_i = 'Foxes' #Zorros
group_j = 'Dogs' #Perros
#------------------------------------------
class Subsession(BaseSubsession):
def creating_session(self):
treat = itertools.cycle([1, 2, 3, 4, 5, 6])
# 1: Full-Free, 2: Sticky-Free, 3: Blurry-Free, 4: Full-Cost, 5: Sticky-Cost, 6: Blurry-Cost
# for p in self.get_players():
# p.treat = next(treat)
for p in self.get_players():
if 'treatment' in self.session.config:
# demo mode
p.treat = self.session.config['treatment']
else:
# live experiment mode
p.treat = next(treat)
class Group(BaseGroup):
pass
class Player(BasePlayer):
treat = models.IntegerField() # Treatments from 1 to 6
given_group = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round'],
],
widget=widgets.RadioSelect
)
appearance = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round by changing my group'],
],
widget=widgets.RadioSelect
)
label = models.PositiveIntegerField(
choices=[
[1, 'It is fixed and does not change'],
[2, 'The computer changes it in each round'],
[3, 'I can change it in each round'],
],
widget=widgets.RadioSelect
)
pay_coord = models.PositiveIntegerField(
choices=[
[1, 'I gain 6 and pay the cost of 2 = 4 points in total'],
[2, 'I gain 4 and pay the cost of 2 = 2 points in total'],
[3, 'I gain 0 and pay the cost of 2 = -2 points in total']
],
widget=widgets.RadioSelect
)
pay_coord2 = models.PositiveIntegerField(
choices=[
[1, 'I gain 6 and pay the cost of 2 = 4 points in total'],
[2, 'I gain 4 and pay the cost of 2 = 2 points in total'],
[3, 'I gain 0 and pay the cost of 2 = -2 points in total']
],
widget=widgets.RadioSelect
)
information = models.PositiveIntegerField(
choices=[
[1, 'They can see the group I choose and my new appearance'],
[2, 'They can see the group I choose and my appearance from Part {}'.format(Constants.part_fixed)],
[3, 'They cannot see the group I choose only my appearance from Part {}'.format(Constants.part_fixed)],
],
widget=widgets.RadioSelect
)
def vars_for_template(self):
return {
'circles_name': self.participant.vars['circles_name'],
'triangles_name': self.participant.vars['triangles_name'],
'circles_label': self.participant.vars['circles_label'],
'triangles_label': self.participant.vars['triangles_label'],
'names': len(Constants.names)
}
|
[
"manumunozh@gmail.com"
] |
manumunozh@gmail.com
|
b1a7b240bbf7f17341177d6f03acfd50f21dbac8
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc094/B/4253154.py
|
6d7383b45734c25c5fe494a308375919ce98d92e
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def solve(a, b):
a, b = min(a, b), max(a, b)
if a == b:
return 2 * a - 2
c = int(math.sqrt(a * b)) + 2
while True:
if c * c < a * b:
if c * (c + 1) >= a * b:
return 2 * c - 2
else:
return 2 * c - 1
else:
c -= 1
Q = int(input())
for _ in range(Q):
a, b = map(int, input().split())
print(solve(a, b))
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
049e4a9ab11ab90ed899286b280e96eef25db4de
|
cadd27a5c72644fe87940e156e4f40f1131b9a57
|
/udemy/_internal.py
|
0cc13463a4fdad5b5c127e648080b4c565dc88a2
|
[
"MIT"
] |
permissive
|
mrbrazzi/udemy-dl
|
1ef576800bd01ed2724911144a1fd8bad70b18f9
|
0f0a3ff00167b3b4614d5afc0d4dc6461e46be97
|
refs/heads/master
| 2022-11-13T17:41:38.170496
| 2020-07-01T18:17:24
| 2020-07-01T18:17:24
| 274,795,490
| 1
| 0
|
MIT
| 2020-06-25T00:16:33
| 2020-06-25T00:16:32
| null |
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2020 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import time
from ._colorized import *
from ._extract import Udemy
from ._shared import (
UdemyCourse,
UdemyChapters,
UdemyLectures,
UdemyLectureStream,
UdemyLectureAssets,
UdemyLectureSubtitles
)
class InternUdemyCourse(UdemyCourse, Udemy):
def __init__(self, *args, **kwargs):
self._info = ''
super(InternUdemyCourse, self).__init__(*args, **kwargs)
def _fetch_course(self):
if self._have_basic:
return
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get('login') == 'successful':
sys.stdout.write(fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sb + "Logged in successfully.\n")
sys.stdout.write('\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading course information .. \r")
self._info = self._real_extract(self._url)
time.sleep(1)
sys.stdout.write('\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloaded course information .. (done)\r\n")
self._id = self._info['course_id']
self._title = self._info['course_title']
self._chapters_count = self._info['total_chapters']
self._total_lectures = self._info['total_lectures']
self._chapters = [InternUdemyChapter(z) for z in self._info['chapters']]
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to logout now...\n")
if not self._cookies:
self._logout()
sys.stdout.write(fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sb + "Logged out successfully.\n")
self._have_basic = True
if auth.get('login') == 'failed':
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Failed to login ..\n")
sys.exit(0)
class InternUdemyChapter(UdemyChapters):
def __init__(self, chapter):
super(InternUdemyChapter, self).__init__()
self._chapter_id = chapter['chapter_id']
self._chapter_title = chapter['chapter_title']
self._unsafe_title = chapter['unsafe_chapter']
self._chapter_index = chapter['chapter_index']
self._lectures_count = chapter.get('lectures_count', 0)
self._lectures = [InternUdemyLecture(z) for z in chapter['lectures']] if self._lectures_count > 0 else []
class InternUdemyLecture(UdemyLectures):
def __init__(self, lectures):
super(InternUdemyLecture, self).__init__()
self._info = lectures
self._lecture_id = self._info['lectures_id']
self._lecture_title = self._info['lecture_title']
self._unsafe_title = self._info['unsafe_lecture']
self._lecture_index = self._info['lecture_index']
self._subtitles_count = self._info.get('subtitle_count', 0)
self._sources_count = self._info.get('sources_count', 0)
self._assets_count = self._info.get('assets_count', 0)
self._extension = self._info.get('extension')
self._html_content = self._info.get('html_content')
self._duration = self._info.get('duration')
if self._duration:
duration = int(self._duration)
(mins, secs) = divmod(duration, 60)
(hours, mins) = divmod(mins, 60)
if hours == 0:
self._duration = "%02d:%02d" % (mins, secs)
else:
self._duration = "%02d:%02d:%02d" % (hours, mins, secs)
def _process_streams(self):
streams = [InternUdemyLectureStream(z, self) for z in self._info['sources']] if self._sources_count > 0 else []
self._streams = streams
def _process_assets(self):
assets = [InternUdemyLectureAssets(z, self) for z in self._info['assets']] if self._assets_count > 0 else []
self._assets = assets
def _process_subtitles(self):
subtitles = [InternUdemyLectureSubtitles(z, self) for z in self._info['subtitles']] if self._subtitles_count > 0 else []
self._subtitles = subtitles
class InternUdemyLectureStream(UdemyLectureStream):
def __init__(self, sources, parent):
super(InternUdemyLectureStream, self).__init__(parent)
self._mediatype = sources.get('type')
self._extension = sources.get('extension')
height = sources.get('height', 0)
width = sources.get('width', 0)
self._resolution = '%sx%s' % (width, height)
self._dimention = width, height
self._quality = self._resolution
self._url = sources.get('download_url')
class InternUdemyLectureAssets(UdemyLectureAssets):
def __init__(self, assets, parent):
super(InternUdemyLectureAssets, self).__init__(parent)
self._mediatype = assets.get('type')
self._extension = assets.get('extension')
self._filename = '{0:03d} {1!s}'.format(parent._lecture_index, assets.get('filename'))
self._url = assets.get('download_url')
class InternUdemyLectureSubtitles(UdemyLectureSubtitles):
def __init__(self, subtitles, parent):
super(InternUdemyLectureSubtitles, self).__init__(parent)
self._mediatype = subtitles.get('type')
self._extension = subtitles.get('extension')
self._language = subtitles.get('language')
self._url = subtitles.get('download_url')
|
[
"r0oth3x49@gmail.com"
] |
r0oth3x49@gmail.com
|
6d9aa23aac7fc83f966804c5d25d3a1d9096f76b
|
81f9d88a560edb2b92997855c6445628cf0e6eaa
|
/homura/__init__.py
|
53bd57eca602723adbb53d18674e93be0451c1cd
|
[
"Apache-2.0"
] |
permissive
|
haiyan-he/homura
|
e1de6e0162a0a3d5c2f2ce142f551562dc30f4c3
|
2b98d4e0071b926233869e5396a02e70638d19f7
|
refs/heads/master
| 2023-02-06T18:25:15.557698
| 2020-10-01T07:54:23
| 2020-10-01T07:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from .register import Registry
from .utils import TensorDataClass, TensorTuple, distributed_print, enable_accimage, get_args, get_environ, \
get_git_hash, get_global_rank, get_local_rank, get_num_nodes, get_world_size, if_is_master, init_distributed, \
is_accimage_available, is_distributed, is_distributed_available, is_faiss_available, is_master, set_deterministic, \
set_seed
Registry.import_modules('homura.vision')
|
[
"hataya@keio.jp"
] |
hataya@keio.jp
|
10682582ea1863fd922d5da0927d22778286f60e
|
38c35956be6343855914b1c58b8fbd2e40c6e615
|
/Grafos/2131.py
|
2dd6d299343e38504e64253ea933e1dd5ba17cd3
|
[] |
no_license
|
LucasBarbosaRocha/URI
|
b43e4f4a6b3beed935f24839001bea354411c4bd
|
2c9bcc13300a9f6243242e483c8f9ec3296a88ad
|
refs/heads/master
| 2020-06-25T05:06:51.297824
| 2019-08-22T04:50:11
| 2019-08-22T04:50:11
| 199,210,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
# -*- coding: utf-8 -*-
# Funcao que cria um grafo
def cria_grafo(lista_de_vs, lista_de_arestas):
grafo = {}
for v in lista_de_vs:
grafo[v] = []
for aresta in lista_de_arestas:
grafo[aresta[0]].append(aresta[1])
return grafo
# Busca em profundidade personalizada
def dfs_iterative(grafo, i, n, verticesValidos):
cores = [-1]*(n+1)
for j in range(i, n + 1):
if (j in verticesValidos):
stack = [j]
cores[j] = 1
while stack:
v = stack.pop()
#print ("BLABLA")
#print (stack)
for adjacencia in grafo[v]:
if (cores[adjacencia] == -1): # Em y (adjacencias) eh a cor invertida da cor do pai
cores[adjacencia] = 1 - cores[v]
stack.append(adjacencia) # Coloco a adjacencia na pilha
elif (cores[adjacencia] == cores[v]): # Se a adjacencia tiver a mesma cor que o pai nao eh bipartido
return False
verticesValidos.remove(v)
#print (cores)
return True
k = 1
while True:
try:
entrada = raw_input().split(" ")
n = int(entrada[0])
m = int(entrada[1])
vertices = []
verticesValidos = []
for i in range(1, n + 1):
vertices.append(i)
arestas = []
grafo = []
caminho = []
totalArestas = 0
print ("Instancia %d" %k)
# Verificar se o grafo eh bipartido
#print ("AQUI")
#print(n, m)
for i in range(m):
entrada = raw_input().split(" ")
v1 = int(entrada[0])
v2 = int(entrada[1])
if (verticesValidos == []):
verticesValidos.append(v1)
verticesValidos.append(v2)
if (v1 not in verticesValidos):
verticesValidos.append(v1)
if (v2 not in verticesValidos):
verticesValidos.append(v2)
arestas.append((v1, v2))
#arestas.append((v2, v1))
grafo = cria_grafo(verticesValidos, arestas)
#print (grafo)
#print (verticesValidos)
if (m == 0 or dfs_iterative(grafo, verticesValidos[0], n, verticesValidos) == True):
print ("sim\n")
else:
print ("nao\n")
k = k + 1
except :
break
|
[
"lucas.lb.rocha@gmail.com"
] |
lucas.lb.rocha@gmail.com
|
3b29475196e55b01c9672bfe50448f45d59c16f9
|
9ffd8754679f363f7c03fa2873dfd3b1f5af41a7
|
/UserRegistration/admin.py
|
90d6d157eb401162e4d94faa6e1e2727258d82ca
|
[] |
no_license
|
cmrajib/django_fashion_ecommerce
|
54848c7f3f1ede349bad77533647cd161d86c413
|
141e87ad77f4a4503e487de8ad360789a9e272f4
|
refs/heads/main
| 2023-02-21T12:31:07.457942
| 2021-01-19T17:58:31
| 2021-01-19T17:58:31
| 331,065,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
from django.contrib import admin
# Register your models here.
from UserRegistration.models import User, Coupon
class UserAdmin(admin.ModelAdmin):
list_display = ('email', 'full_name')
list_display_links = ('email', 'full_name')
# list_filter = ('user__email','full_name','city')
# list_editable = ('is_featured',)
search_fields =('full_name', 'phone')
list_per_page = 10
admin.site.register(User, UserAdmin)
admin.site.register(Coupon)
|
[
"cmrajib@gmail.com"
] |
cmrajib@gmail.com
|
4049996983313e176aa4972cf259096df6168fe7
|
eee6dd18897d3118f41cb5e6f93f830e06fbfe2f
|
/venv/lib/python3.6/site-packages/scipy/sparse/spfuncs.py
|
a5aeb7d325475944cf4927075b955f7bbfa4a436
|
[] |
no_license
|
georgeosodo/ml
|
2148ecd192ce3d9750951715c9f2bfe041df056a
|
48fba92263e9295e9e14697ec00dca35c94d0af0
|
refs/heads/master
| 2020-03-14T11:39:58.475364
| 2018-04-30T13:13:01
| 2018-04-30T13:13:01
| 131,595,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
""" Functions that operate on sparse matrices
"""
__all__ = ['count_blocks','estimate_blocksize']
from .csr import isspmatrix_csr, csr_matrix
from .csc import isspmatrix_csc
from ._sparsetools import csr_count_blocks
def extract_diagonal(A):
raise NotImplementedError('use .diagonal() instead')
#def extract_diagonal(A):
# """extract_diagonal(A) returns the main diagonal of A."""
# #TODO extract k-th diagonal
# if isspmatrix_csr(A) or isspmatrix_csc(A):
# fn = getattr(sparsetools, A.format + "_diagonal")
# y = empty( min(A.shape), dtype=upcast(A.dtype) )
# fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y)
# return y
# elif isspmatrix_bsr(A):
# M,N = A.shape
# R,C = A.blocksize
# y = empty( min(M,N), dtype=upcast(A.dtype) )
# fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \
# A.indptr, A.indices, ravel(A.data), y)
# return y
# else:
# return extract_diagonal(csr_matrix(A))
def estimate_blocksize(A,efficiency=0.7):
"""Attempt to determine the blocksize of a sparse matrix
Returns a blocksize=(r,c) such that
- A.nnz / A.tobsr( (r,c) ).nnz > efficiency
"""
if not (isspmatrix_csr(A) or isspmatrix_csc(A)):
A = csr_matrix(A)
if A.nnz == 0:
return (1,1)
if not 0 < efficiency < 1.0:
raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
high_efficiency = (1.0 + efficiency) / 2.0
nnz = float(A.nnz)
M,N = A.shape
if M % 2 == 0 and N % 2 == 0:
e22 = nnz / (4 * count_blocks(A,(2,2)))
else:
e22 = 0.0
if M % 3 == 0 and N % 3 == 0:
e33 = nnz / (9 * count_blocks(A,(3,3)))
else:
e33 = 0.0
if e22 > high_efficiency and e33 > high_efficiency:
e66 = nnz / (36 * count_blocks(A,(6,6)))
if e66 > efficiency:
return (6,6)
else:
return (3,3)
else:
if M % 4 == 0 and N % 4 == 0:
e44 = nnz / (16 * count_blocks(A,(4,4)))
else:
e44 = 0.0
if e44 > efficiency:
return (4,4)
elif e33 > efficiency:
return (3,3)
elif e22 > efficiency:
return (2,2)
else:
return (1,1)
def count_blocks(A,blocksize):
"""For a given blocksize=(r,c) count the number of occupied
blocks in a sparse matrix A
"""
r,c = blocksize
if r < 1 or c < 1:
raise ValueError('r and c must be positive')
if isspmatrix_csr(A):
M,N = A.shape
return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
elif isspmatrix_csc(A):
return count_blocks(A.T,(c,r))
else:
return count_blocks(csr_matrix(A),blocksize)
|
[
"georgeosodo2010@gmail.com"
] |
georgeosodo2010@gmail.com
|
621d2aad3eb42bb8dbd059d46d0a17ff7f170215
|
a8f6a8afd6b3609a947cafad5988d025454b4f9c
|
/datesFromLogs_Test.py
|
c15388862c6f16360d4caae0cd5764dee4ff4481
|
[] |
no_license
|
andreodendaal/100DaysOfCode
|
aede59a6e1f3f3ada30a1a534548939a7b9b375f
|
282bf21c2d75fcd562ae935fa23e41a7c4c0cb45
|
refs/heads/master
| 2020-03-20T17:21:44.649611
| 2019-02-27T17:22:56
| 2019-02-27T17:22:56
| 137,557,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
import unittest
from datesFromLogs_d2 import datetime, timedelta
from datesFromLogs_d2 import loglines, convert_to_datetime, time_between_shutdowns
class TestDatesFromLogs(unittest.TestCase):
def test_convert_to_datetime(self):
line1 = 'ERROR 2014-07-03T23:24:31 supybot Invalid user dictionary file'
line2 = 'INFO 2015-10-03T10:12:51 supybot Shutdown initiated.'
line3 = 'INFO 2016-09-03T02:11:22 supybot Shutdown complete.'
self.assertEqual(convert_to_datetime(line1), datetime(2014, 7, 3, 23, 24, 31))
self.assertEqual(convert_to_datetime(line2), datetime(2015, 10, 3, 10, 12, 51))
self.assertEqual(convert_to_datetime(line3), datetime(2016, 9, 3, 2, 11, 22))
def test_time_between_events(self):
diff = time_between_shutdowns(loglines)
self.assertEqual(type(diff), timedelta)
self.assertEqual(str(diff), '0:03:31')
if __name__ == '__main__':
unittest.main()
|
[
"aodendaal.direct@gmail.com"
] |
aodendaal.direct@gmail.com
|
aa8d5d425f8c8c07e9aa8bd8b01d8c38944a0232
|
295ecf4f254c42e9201657ef0a13ec2c68c40c9b
|
/buy/forms.py
|
c80566d1a697136ed495d48bc28f1acb92a3c021
|
[] |
no_license
|
zwolf21/StockAdmin-pre2
|
0236061284a6fe8801591608591d21129d4ea7c0
|
b21d069ff215c17ce3bca040ecf9b8f48b452ed4
|
refs/heads/master
| 2021-05-01T09:28:59.818469
| 2016-11-30T17:33:30
| 2016-11-30T17:33:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
from django import forms
from .models import BuyItem, Buy
from datetime import date
class CreateBuyForm(forms.ModelForm):
date = forms.DateField(initial=date.today(), widget=forms.TextInput(attrs={'tabindex':'-1','readonly':'readonly'}))
class Meta:
model = Buy
fields = ['date']
class BuyItemAddForm(forms.ModelForm):
# name = forms.CharField(label='약품명', required=False)
amount = forms.IntegerField(label='수량', required=False, help_text='위아래 방향키로 수량조절')
class Meta:
model = BuyItem
fields = ['amount']
help_texts = {'amount':('위아래 방향키로 수량 조절')}
|
[
"pbr112@naver.com"
] |
pbr112@naver.com
|
c6ab3e4068fda9c8202f9017c37654175a68e019
|
393a387cdb286cde75b4b7d760625d5851b6b080
|
/Sorting items from user in alphabetical and reverse.py
|
90a60ccf36cb5d37082e64d25c13965862512f11
|
[] |
no_license
|
nami-h/Python
|
b57f12ae48d5bc17a3de72ec7c5abb5622ba0cd2
|
7b067950da29df705237405742564d2f127f1446
|
refs/heads/master
| 2021-06-27T16:00:10.113762
| 2020-09-22T19:59:05
| 2020-09-22T19:59:05
| 136,550,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
lis=[]
num=int(input("How many animals do you want to enter? "))
for add in range(num):
add=input("Enter animal: ")
lis.append(add)
animals=['horse','cat','mouse']
s=lis+animals
print("Our list consists of: ", s)
s.sort()
print("Alphabetically ordered: ", s)
s.reverse()
print("Reverse ordered: ",s)
|
[
"noreply@github.com"
] |
nami-h.noreply@github.com
|
75abc5b6b01a6dcbb9c8d615f21e672899c50936
|
f9e3a0fb511470561d3d94bc984dafaee06000cb
|
/PP4E-Examples-1.4/Examples/PP4E/Preview/person_start.py
|
c1ba77cd5ea9c9f2d7400fccde6645dfcf752e38
|
[] |
no_license
|
Sorath93/Programming-Python-book
|
359b6fff4e17b44b9842662f484bbafb490cfd3d
|
ebe4c93e265edd4ae135491bd2f96904d08a911c
|
refs/heads/master
| 2022-12-03T01:49:07.815439
| 2020-08-16T22:19:38
| 2020-08-16T22:19:38
| 287,775,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
class Person:
def __init__(self, name, age, pay=0, job=None):
self.name = name
self.age = age
self.pay = pay
self.job = job
if __name__ == '__main__':
bob = Person('Bob Smith', 42, 30000, 'software')
sue = Person('Sue Jones', 45, 40000, 'hardware')
print(bob.name, sue.pay)
print(bob.name.split()[-1])
sue.pay *= 1.10
print(sue.pay)
|
[
"Sorath.Soomro@isode.com"
] |
Sorath.Soomro@isode.com
|
a52bc78e9d3b3e11b7cddbd0d97869f5a2a6bec2
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5040/828005040.py
|
4a6d507aea18f617baa5083c1e25b5d6ed822f7a
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
from bots.botsconfig import *
from records005040 import recorddefs
syntax = {
'version': '00504',
'functionalgroup': 'DA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BAU', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 99999},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DAD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 99999},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'CTT', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"doug.vanhorn@tagglogistics.com"
] |
doug.vanhorn@tagglogistics.com
|
2940503764898b98082340170692f4e10443826b
|
8e59a43de9d427865c5d67fef39e9a50e44f07ce
|
/ppocr/modeling/heads/rec_multi_head.py
|
0b4fa939eecad15c79f5e37384944720b1879205
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleOCR
|
af87db8a804b9a4f4eac8a0b2faf80d1dd71633a
|
15963b0d242867a4cc4d76445626dc8965509b2f
|
refs/heads/release/2.7
| 2023-09-01T04:53:37.561932
| 2023-08-30T02:22:15
| 2023-08-30T02:22:15
| 262,296,122
| 34,195
| 7,338
|
Apache-2.0
| 2023-09-14T06:08:11
| 2020-05-08T10:38:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,301
|
py
|
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from ppocr.modeling.necks.rnn import Im2Seq, EncoderWithRNN, EncoderWithFC, SequenceEncoder, EncoderWithSVTR
from .rec_ctc_head import CTCHead
from .rec_sar_head import SARHead
from .rec_nrtr_head import Transformer
class FCTranspose(nn.Layer):
def __init__(self, in_channels, out_channels, only_transpose=False):
super().__init__()
self.only_transpose = only_transpose
if not self.only_transpose:
self.fc = nn.Linear(in_channels, out_channels, bias_attr=False)
def forward(self, x):
if self.only_transpose:
return x.transpose([0, 2, 1])
else:
return self.fc(x.transpose([0, 2, 1]))
class MultiHead(nn.Layer):
def __init__(self, in_channels, out_channels_list, **kwargs):
super().__init__()
self.head_list = kwargs.pop('head_list')
self.gtc_head = 'sar'
assert len(self.head_list) >= 2
for idx, head_name in enumerate(self.head_list):
name = list(head_name)[0]
if name == 'SARHead':
# sar head
sar_args = self.head_list[idx][name]
self.sar_head = eval(name)(in_channels=in_channels, \
out_channels=out_channels_list['SARLabelDecode'], **sar_args)
elif name == 'NRTRHead':
gtc_args = self.head_list[idx][name]
max_text_length = gtc_args.get('max_text_length', 25)
nrtr_dim = gtc_args.get('nrtr_dim', 256)
num_decoder_layers = gtc_args.get('num_decoder_layers', 4)
self.before_gtc = nn.Sequential(
nn.Flatten(2), FCTranspose(in_channels, nrtr_dim))
self.gtc_head = Transformer(
d_model=nrtr_dim,
nhead=nrtr_dim // 32,
num_encoder_layers=-1,
beam_size=-1,
num_decoder_layers=num_decoder_layers,
max_len=max_text_length,
dim_feedforward=nrtr_dim * 4,
out_channels=out_channels_list['NRTRLabelDecode'])
elif name == 'CTCHead':
# ctc neck
self.encoder_reshape = Im2Seq(in_channels)
neck_args = self.head_list[idx][name]['Neck']
encoder_type = neck_args.pop('name')
self.ctc_encoder = SequenceEncoder(in_channels=in_channels, \
encoder_type=encoder_type, **neck_args)
# ctc head
head_args = self.head_list[idx][name]['Head']
self.ctc_head = eval(name)(in_channels=self.ctc_encoder.out_channels, \
out_channels=out_channels_list['CTCLabelDecode'], **head_args)
else:
raise NotImplementedError(
'{} is not supported in MultiHead yet'.format(name))
def forward(self, x, targets=None):
ctc_encoder = self.ctc_encoder(x)
ctc_out = self.ctc_head(ctc_encoder, targets)
head_out = dict()
head_out['ctc'] = ctc_out
head_out['ctc_neck'] = ctc_encoder
# eval mode
if not self.training:
return ctc_out
if self.gtc_head == 'sar':
sar_out = self.sar_head(x, targets[1:])
head_out['sar'] = sar_out
else:
gtc_out = self.gtc_head(self.before_gtc(x), targets[1:])
head_out['nrtr'] = gtc_out
return head_out
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
47c310bae1e4abdaa6b75569a117b14e0647509e
|
a53998e56ee06a96d59d97b2601fd6ec1e4124d7
|
/基础课/jichu/day16/seek.py
|
9bc56e0f93452a501443ed06d36a8a5bd659e588
|
[] |
no_license
|
zh-en520/aid1901
|
f0ec0ec54e3fd616a2a85883da16670f34d4f873
|
a56f82d0ea60b2395deacc57c4bdf3b6bc73bd2e
|
refs/heads/master
| 2020-06-28T21:16:22.259665
| 2019-08-03T07:09:29
| 2019-08-03T07:09:29
| 200,344,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
fr = open('20bytes.txt','rb')
print('当前读写位置是:',fr.tell())#0
b = fr.reed(2)
print(b)#b'AB'
print('当前读写位置是:',fe.tell())#2
#读写abcde这五个字节
fr.seek(5,0)#
# fr.seek(3,1)
# fr.seek(-15,2)
b = fr.read(5)#b'abcde'
print(b)
|
[
"zh_en520@163.com"
] |
zh_en520@163.com
|
83c7d94c9223617493fa9162a58e518ded6fbd10
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_price_response.py
|
437f28d12f863bbca7d3ec3c11acd04386c74027
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowPriceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'prices': 'list[ResourcePrice]',
'status': 'str'
}
attribute_map = {
'prices': 'prices',
'status': 'status'
}
def __init__(self, prices=None, status=None):
"""ShowPriceResponse - a model defined in huaweicloud sdk"""
super(ShowPriceResponse, self).__init__()
self._prices = None
self._status = None
self.discriminator = None
if prices is not None:
self.prices = prices
if status is not None:
self.status = status
@property
def prices(self):
"""Gets the prices of this ShowPriceResponse.
技术栈价格列表
:return: The prices of this ShowPriceResponse.
:rtype: list[ResourcePrice]
"""
return self._prices
@prices.setter
def prices(self, prices):
"""Sets the prices of this ShowPriceResponse.
技术栈价格列表
:param prices: The prices of this ShowPriceResponse.
:type: list[ResourcePrice]
"""
self._prices = prices
@property
def status(self):
"""Gets the status of this ShowPriceResponse.
状态
:return: The status of this ShowPriceResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowPriceResponse.
状态
:param status: The status of this ShowPriceResponse.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowPriceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
4a538cff9ad6cd0890340bd264318ca2cc7dc8c3
|
cc9a0d5608b2209b02591ceace0a7416823a9de5
|
/config/settings/local.py
|
cc31fc60988cb0f8692e336882a572a81a46a794
|
[
"MIT"
] |
permissive
|
morwen1/hack_your_body
|
240838e75dd4447c944d47d37635d2064d4210fd
|
d4156d4fbe2dd4123d5b5bceef451803a50a39f8
|
refs/heads/master
| 2020-11-24T01:55:46.323849
| 2019-12-15T18:15:51
| 2019-12-15T18:15:51
| 226,505,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="EGR9jaMwa7cjRCcM2wIxqFPD2RqJ6yIEAiL7KlbEUKIPVjjPcL9ZMHgprAJiT2T7",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
#INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
#MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
"""
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}"""
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Celery
# ------------------------------------------------------------------------------
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = True
# Your stuff...
# ------------------------------------------------------------------------------
|
[
"morwen901@gmail.com"
] |
morwen901@gmail.com
|
a00da9b3a8568ca40cac0d1ea67083ce9ef97c43
|
9af35b0e0c0ed4b102c61c563d7c7647a758bb72
|
/braindecode/datautil/signal_target.py
|
ebb6280370c5f4026abc132b5482426657926cdc
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
TonioBall/braindecode
|
7c0a5217c944e8718d74fd9763b3c609252c9feb
|
d5b8d87d959c96ea8422e21099e1ef4b71b9d05a
|
refs/heads/master
| 2020-12-13T08:56:34.939109
| 2020-01-17T13:27:30
| 2020-01-17T13:27:30
| 234,367,901
| 0
| 0
|
BSD-3-Clause
| 2020-01-16T16:56:45
| 2020-01-16T16:56:44
| null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
class SignalAndTarget(object):
"""
Simple data container class.
Parameters
----------
X: 3darray or list of 2darrays
The input signal per trial.
y: 1darray or list
Labels for each trial.
"""
def __init__(self, X, y):
assert len(X) == len(y)
self.X = X
self.y = y
def apply_to_X_y(fn, *sets):
"""
Apply a function to all `X` and `y` attributes of all given sets.
Applies function to list of X arrays and to list of y arrays separately.
Parameters
----------
fn: function
Function to apply
sets: :class:`.SignalAndTarget` objects
Returns
-------
result_set: :class:`.SignalAndTarget`
Dataset with X and y as the result of the
application of the function.
"""
X = fn(*[s.X for s in sets])
y = fn(*[s.y for s in sets])
return SignalAndTarget(X, y)
|
[
"robintibor@gmail.com"
] |
robintibor@gmail.com
|
9f7cd33b09083b7ca32ab65d512beb3d76667dc4
|
525bdfe2c7d33c901598a501c145df94a3e162b0
|
/subject6_graphs/text.py
|
e255a01bf2155d3b55fd0ba8180d1b331aa5b94d
|
[] |
no_license
|
davendiy/ads_course2
|
f0a52108f1cab8619b2e6e2c6c4383a1a4615c15
|
e44bf2b535b34bc31fb323c20901a77b0b3072f2
|
refs/heads/master
| 2020-04-06T09:37:12.983564
| 2019-05-09T10:28:22
| 2019-05-09T10:28:22
| 157,349,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,980
|
py
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# created: 05.12.18
# by David Zashkolny
# 2 course, comp math
# Taras Shevchenko National University of Kyiv
# email: davendiy@gmail.com
tasks = int(input())
for mini_task in range(tasks):
s = input().split()
n, m = int(s[0]), int(s[1])
l = []
x, y = -1, -1
xe, ye = -1, -1
cost = list(map(lambda x: int(x), input().split()))
for i in range(n):
buff = []
k = input()
if x == -1:
y, x = i, k.find('S')
if xe == -1:
ye, xe = i, k.find('E')
for j in k:
buff.append(j)
l.append(buff)
keys = [['', 0], ['R', cost[0]], ['G', cost[1]], ['B', cost[2]], ['Y', cost[3]], ['RG', cost[0] + cost[1]],
['RB', cost[0] + cost[2]], ['RY', cost[0] + cost[3]], ['GB', cost[1] + cost[2]], ['GY', cost[1] + cost[3]],
['BY', cost[2] + cost[3]], ['RGB', cost[0] + cost[1] + cost[2]], ['RGY', cost[0] + cost[1] + cost[3]],
['RBY', cost[0] + cost[2] + cost[3]], ['GBY', cost[1] + cost[2] + cost[3]], ['RGBY', sum(cost)]]
# keys = ['G']
keys.sort(key=lambda x: x[1])
way = [False, '']
for key in keys:
hl = [[-1 for i in range(m)] for j in range(n)]
hl[y][x] = 1
indexes = [[y, x]]
for i in range(n * m + 1):
buff = []
for j in indexes:
if 0 <= j[1] + 1 < m and (l[j[0]][j[1] + 1] == '.' or key[0].find(l[j[0]][j[1] + 1]) != -1 or
l[j[0]][j[1] + 1] == 'E') and hl[j[0]][j[1] + 1] == -1:
hl[j[0]][j[1] + 1] = hl[j[0]][j[1]] + 1
buff.append([j[0], j[1] + 1])
if 0 <= j[1] - 1 < m and (l[j[0]][j[1] - 1] == '.' or key[0].find(l[j[0]][j[1] - 1]) != -1 or
l[j[0]][j[1] - 1] == 'E') and hl[j[0]][
j[1] - 1] == -1:
hl[j[0]][j[1] - 1] = hl[j[0]][j[1]] + 1
buff.append([j[0], j[1] - 1])
if 0 <= j[0] + 1 < n and (l[j[0] + 1][j[1]] == '.' or key[0].find(l[j[0] + 1][j[1]]) != -1 or
l[j[0] + 1][j[1]] == 'E') and hl[j[0] + 1][
j[1]] == -1:
hl[j[0] + 1][j[1]] = hl[j[0]][j[1]] + 1
buff.append([j[0] + 1, j[1]])
if 0 <= j[0] - 1 < n and (l[j[0] - 1][j[1]] == '.' or key[0].find(l[j[0] - 1][j[1]]) != -1 or
l[j[0] - 1][j[1]] == 'E') and hl[j[0] - 1][j[1]] == -1:
hl[j[0] - 1][j[1]] = hl[j[0]][j[1]] + 1
buff.append([j[0] - 1, j[1]])
if j == [ye, xe]:
way = [True, key[1]]
break
indexes = buff
if way[0]:
break
if way[0]:
break
if way[0]:
print(way[1])
else:
print('Sleep')
|
[
"davendiy@gmail.com"
] |
davendiy@gmail.com
|
cd6e8276b89ab78c02389cbb84c51b5ba844fbdb
|
421d58c6b93b81e0724f8f4576119300eb344252
|
/influencers/users/migrations/0005_auto_20181111_1505.py
|
1d7c2ca75db84615820701ef82599ab6c705746f
|
[] |
no_license
|
momen/influencers
|
7728228c92a552bdff9ae62f85986ad03bce186e
|
f9c76cfc2970440112967f9579dc31f77063cb25
|
refs/heads/master
| 2020-06-03T22:20:03.881411
| 2019-06-15T07:48:43
| 2019-06-15T07:48:43
| 191,754,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# Generated by Django 2.1.2 on 2018-11-11 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_is_removed'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=False, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
]
|
[
"momennegm@gmail.com"
] |
momennegm@gmail.com
|
792451ae4deffa9b1fcfbae36e7e0397b0f3d802
|
cf4f3c181dc04c4e698b53c3bb5dd5373b0cc1f4
|
/meridian/tst/acupoints/test_zusanli213.py
|
f53e9b7310099603a6d346caed38546241f59172
|
[
"Apache-2.0"
] |
permissive
|
sinotradition/meridian
|
da3bba6fe42d3f91397bdf54520b3085f7c3bf1d
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
refs/heads/master
| 2021-01-10T03:20:18.367965
| 2015-12-14T14:58:35
| 2015-12-14T14:58:35
| 46,456,260
| 5
| 3
| null | 2015-11-29T15:00:20
| 2015-11-19T00:21:00
|
Python
|
UTF-8
|
Python
| false
| false
| 301
|
py
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zusanli213
class TestZusanli213Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
|
[
"sinotradition@gmail.com"
] |
sinotradition@gmail.com
|
78114eb9b8af0d0f91311f5efe983687ab814067
|
71f00ed87cd980bb2f92c08b085c5abe40a317fb
|
/Data/GoogleCloud/google-cloud-sdk/lib/surface/ai_platform/operations/describe.py
|
06aa812cb986b22a941e19bab4929cc2330eda88
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
factoryofthesun/Rao-NLP
|
2bd8269a8eed1cb352c14c8fde88e3111ccca088
|
87f9723f5ee51bd21310d58c3425a2a7271ec3c5
|
refs/heads/master
| 2023-04-18T08:54:08.370155
| 2020-06-09T23:24:07
| 2020-06-09T23:24:07
| 248,070,291
| 0
| 1
| null | 2021-04-30T21:13:04
| 2020-03-17T20:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ai-platform jobs describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import endpoint_util
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import operations_util
def _AddDescribeArgs(parser):
flags.OPERATION_NAME.AddToParser(parser)
flags.GetRegionArg('operation').AddToParser(parser)
class Describe(base.DescribeCommand):
"""Describe an AI Platform operation."""
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
with endpoint_util.MlEndpointOverrides(region=args.region):
client = operations.OperationsClient()
return operations_util.Describe(client, args.operation)
|
[
"guanzhi97@gmail.com"
] |
guanzhi97@gmail.com
|
1b661917f1bbf1889691f540f74bfadc8996f42a
|
244189d49a3967b4b002af73f40ca8e8064c4771
|
/modules/auxiliary/scanner/http/octopusdeploy_login.rb
|
e95f4eb8c6b5e148702da83e0b31045c7dcf1249
|
[
"MIT"
] |
permissive
|
darkcode357/thg-framework
|
7540609fb79619bdc12bd98664976d51c79816aa
|
c1c3bd748aac85a8c75e52486ae608981a69d93a
|
refs/heads/master
| 2023-03-01T05:06:51.399919
| 2021-06-01T14:00:32
| 2021-06-01T14:00:32
| 262,925,227
| 11
| 6
|
NOASSERTION
| 2023-02-10T23:11:02
| 2020-05-11T03:04:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,528
|
rb
|
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'metasploit/framework/credential_collection'
require 'metasploit/framework/login_scanner/octopusdeploy'
class MetasploitModule < Msf::Auxiliary
include Msf::Exploit::Remote::HttpClient
include Msf::Auxiliary::Report
include Msf::Auxiliary::AuthBrute
include Msf::Auxiliary::Scanner
def initialize
super(
'Name' => 'Octopus Deploy Login Utility',
'Description' => %q{
This module simply attempts to login to an Octopus Deploy server using a specific
username and password. It has been confirmed to work on version 3.4.4
},
'Author' => [ 'James Otten <jamesotten1[at]gmail.com>' ],
'License' => MSF_LICENSE
)
register_options(
[
Opt::RPORT(80),
OptString.new('TARGETURI', [true, 'URI for login. Default is /api/users/login', '/api/users/login'])
])
deregister_options('PASSWORD_SPRAY')
end
def run_host(ip)
cred_collection = Metasploit::Framework::CredentialCollection.new(
blank_passwords: datastore['BLANK_PASSWORDS'],
pass_file: datastore['PASS_FILE'],
password: datastore['PASSWORD'],
user_file: datastore['USER_FILE'],
userpass_file: datastore['USERPASS_FILE'],
username: datastore['USERNAME'],
user_as_pass: datastore['USER_AS_PASS']
)
scanner = Metasploit::Framework::LoginScanner::OctopusDeploy.new(
configure_http_login_scanner(
cred_details: cred_collection,
stop_on_success: datastore['STOP_ON_SUCCESS'],
bruteforce_speed: datastore['BRUTEFORCE_SPEED'],
connection_timeout: 10,
http_username: datastore['HttpUsername'],
http_password: datastore['HttpPassword'],
uri: datastore['TARGETURI']
)
)
scanner.scan! do |result|
credential_data = result.to_h
credential_data.merge!(
module_fullname: fullname,
workspace_id: myworkspace_id
)
if result.success?
credential_core = create_credential(credential_data)
credential_data[:core] = credential_core
create_credential_login(credential_data)
print_good "#{ip}:#{rport} - Login Successful: #{result.credential}"
else
invalidate_login(credential_data)
vprint_error "#{ip}:#{rport} - LOGIN FAILED: #{result.credential} (#{result.status})"
end
end
end
end
|
[
"darkocde357@gmail.com"
] |
darkocde357@gmail.com
|
adb6427317dfc161210edab8159cb6ef4ec06f21
|
23b44edcd663eb60d4deee64ced5a5b27ee3b7d2
|
/thermosteam/chemicals/phase_change.py
|
ef5a32237d3cf1e9db68f454ccd5c957c0c7a959
|
[
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"MIT"
] |
permissive
|
sarangbhagwat/thermosteam
|
7b13c6c3146fe2fc378b453fe3c732dc7397ea0c
|
710ec22b17c257a742300bf172fd3121852abf98
|
refs/heads/master
| 2022-12-13T03:37:49.251727
| 2020-09-14T17:24:30
| 2020-09-14T17:24:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module extends the phase_change module from the chemicals's library:
# https://github.com/CalebBell/chemicals
# Copyright (C) 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
#
# This module is under a dual license:
# 1. The UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
#
# 2. The MIT open-source license. See
# https://github.com/CalebBell/chemicals/blob/master/LICENSE.txt for details.
from chemicals import phase_change as pc
import numpy as np
from ..base import InterpolatedTDependentModel, TDependentHandleBuilder, functor
from .. import functional as fn
from chemicals.dippr import EQ106
from .data import (phase_change_data_Perrys2_150,
phase_change_data_VDI_PPDS_4,
VDI_saturation_dict,
phase_change_data_Alibakhshi_Cs,
lookup_VDI_tabular_data,
Hvap_data_CRC,
Hvap_data_Gharagheizi,
)
### Enthalpy of Vaporization at T
Clapeyron = functor(pc.Clapeyron, 'Hvap')
Pitzer = functor(pc.Pitzer, 'Hvap')
SMK = functor(pc.SMK, 'Hvap')
MK = functor(pc.MK, 'Hvap')
Velasco = functor(pc.Velasco, 'Hvap')
Watson = functor(pc.Watson, 'Hvap')
Alibakhshi = functor(pc.Alibakhshi, 'Hvap')
PPDS12 = functor(pc.PPDS12, 'Hvap')
def Clapeyron_hook(self, T, kwargs):
kwargs = kwargs.copy()
Psat = kwargs['Psat']
if callable(Psat): kwargs['Psat'] = Psat = Psat(T)
if 'V' in kwargs:
# Use molar volume to compute dZ if possible
V = kwargs.pop('V')
kwargs['dZ'] = fn.Z(T, Psat, V.g(T, Psat) - V.l(T, Psat))
return self.function(T, **kwargs)
Clapeyron.functor.hook = Clapeyron_hook
@TDependentHandleBuilder('Hvap')
def heat_of_vaporization_handle(handle, CAS, Tb, Tc, Pc, omega,
similarity_variable, Psat, V):
# if has_CoolProp and self.CASRN in coolprop_dict:
# methods.append(COOLPROP)
# self.CP_f = coolprop_fluids[self.CASRN]
# Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)
add_model = handle.add_model
if CAS in phase_change_data_Perrys2_150:
Tc, C1, C2, C3, C4, Tmin, Tmax = phase_change_data_Perrys2_150[CAS]
data = (Tc, C1, C2, C3, C4)
add_model(EQ106.functor.from_args(data), Tmin, Tmax)
if CAS in phase_change_data_VDI_PPDS_4:
Tc, A, B, C, D, E = phase_change_data_VDI_PPDS_4[CAS]
add_model(PPDS12.functor.from_args(data=(Tc, A, B, C, D, E)), 0, Tc)
if all((Tc, Pc)):
model = Clapeyron.functor.from_args(data=(Tc, Pc, None, Psat))
model.V = V
add_model(model, 0, Tc)
data = (Tc, omega)
if all(data):
for f in (MK, SMK, Velasco, Pitzer):
add_model(f.functor.from_args(data), 0, Tc)
if CAS in VDI_saturation_dict:
Ts, Hvaps = lookup_VDI_tabular_data(CAS, 'Hvap')
add_model(InterpolatedTDependentModel(Ts, Hvaps, Ts[0], Ts[-1]))
if Tc:
if CAS in phase_change_data_Alibakhshi_Cs:
C = float(phase_change_data_Alibakhshi_Cs.get(CAS, 'C'))
add_model(Alibakhshi.functor.from_args(data=(Tc, C)), 0, Tc)
if CAS in Hvap_data_CRC:
Hvap = float(Hvap_data_CRC.get(CAS, 'HvapTb'))
if not np.isnan(Hvap):
Tb = float(Hvap_data_CRC.get(CAS, 'Tb'))
data = dict(Hvap_ref=Hvap, T_ref=Tb, Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
Hvap = float(Hvap_data_CRC.get(CAS, 'Hvap298'))
if not np.isnan(Hvap):
data = dict(Hvap_ref=Hvap, T_ref=298., Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
if CAS in Hvap_data_Gharagheizi:
Hvap = float(Hvap_data_Gharagheizi.get(CAS, 'Hvap298'))
data = dict(Hvap_ref=Hvap, T_ref=298., Tc=Tc, exponent=0.38)
add_model(Watson.functor.from_kwargs(data), 0, Tc)
data = (Tb, Tc, Pc)
if all(data):
for f in (pc.Riedel, pc.Chen, pc.Vetere, pc.Liu):
add_model(f(*data), 0, Tc)
pc.heat_of_vaporization_handle = heat_of_vaporization_handle
|
[
"yoelcortes@gmail.com"
] |
yoelcortes@gmail.com
|
6b5240c09546ae48c6e11c2e580c271be45aea67
|
b75ee1f07fcc50142da444e8ae9ba195bf49977a
|
/test/todo.py
|
fda75e5f8c9744005e5de098f819c7cadc1540c1
|
[
"Apache-2.0"
] |
permissive
|
FlorianLudwig/code-owl
|
369bdb57a66c0f06e07853326be685c177e2802a
|
be6518c89fb49ae600ee004504f9485f328e1090
|
refs/heads/master
| 2016-08-04T02:26:07.445016
| 2014-05-25T19:19:13
| 2014-05-25T19:19:13
| 18,918,361
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# this file contains tests for missing features
# this means the tests here do FAIL.
import codeowl.search
def match(query, code):
query = codeowl.search.generate_query(query)
code = codeowl.code.parse(code)
return codeowl.search.tokens(query, code, '<test>')
def test_py_import():
assert match(
'import foo',
'from foo import bar'
)
assert match(
'import foo.bar',
'from foo import bar'
)
assert not match(
'import foo',
'import bar; print foo'
)
def test_py_block():
"""Tree based matching
do semantic matching of code blocks."""
assert match(
'for: print i',
'for i in xrange(10):\n'
' pass\n'
' print i\n'
)
# same as above just a few spaces less
# since there are less not-maching tokens
# this actually scores better than the
# example above. But it should not match
# at all.
assert not match(
'for: print i',
'for i in xrange(10):\n'
' pass\n'
'print i\n'
)
|
[
"f.ludwig@greyrook.com"
] |
f.ludwig@greyrook.com
|
2bf16d2a0b120a587301917cafe6e3763746f348
|
52a3beeb07ad326115084a47a9e698efbaec054b
|
/horizon/.venv/bin/heat
|
da3eeefdf4e4f11003d3c6597a0c69aea163d0fd
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/sample_scripts
|
3dade0710ecdc8f9251dc60164747830f8de6877
|
f9edce63c0a4d636f672702153662bd77bfd400d
|
refs/heads/master
| 2022-11-17T19:19:34.210886
| 2018-06-11T04:14:27
| 2018-06-11T04:14:27
| 282,088,840
| 0
| 0
| null | 2020-07-24T00:57:31
| 2020-07-24T00:57:31
| null |
UTF-8
|
Python
| false
| false
| 240
|
#!/home/horizon/horizon/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from heatclient.shell import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"Suhaib.Chishti@exponential.com"
] |
Suhaib.Chishti@exponential.com
|
|
62de38f216a4bb285bd5c78c9ae8517e3d1c44dc
|
4ddf82eeb31d46fb67802a4375390eb42a8f23b8
|
/tests/pyb/adc.py
|
875d31d732cc3648fff5484fb1e95c617dda69ad
|
[
"MIT"
] |
permissive
|
pulkin/micropython
|
1437a507b9e90c8824e80c3553e6209d89e64565
|
c274c947c611f510fd2b1c4ef6cbd9f4283794fc
|
refs/heads/master
| 2023-03-08T02:35:28.208819
| 2022-04-19T12:38:47
| 2022-04-19T12:38:47
| 167,732,676
| 103
| 36
|
MIT
| 2023-02-25T03:02:36
| 2019-01-26T19:57:59
|
C
|
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
from pyb import ADC, Timer
adct = ADC(16) # Temperature 930 -> 20C
print(str(adct)[:19])
adcv = ADC(17) # Voltage 1500 -> 3.3V
print(adcv)
# read single sample; 2.5V-5V is pass range
val = adcv.read()
assert val > 1000 and val < 2000
# timer for read_timed
tim = Timer(5, freq=500)
# read into bytearray
buf = bytearray(b"\xff" * 50)
adcv.read_timed(buf, tim)
print(len(buf))
for i in buf:
assert i > 50 and i < 150
# read into arrays with different element sizes
import array
arv = array.array("h", 25 * [0x7FFF])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
arv = array.array("i", 30 * [-1])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
# Test read_timed_multi
arv = bytearray(b"\xff" * 50)
art = bytearray(b"\xff" * 50)
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 60 and i < 125
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 15 and i < 200
arv = array.array("i", 25 * [-1])
art = array.array("i", 25 * [-1])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
arv = array.array("h", 25 * [0x7FFF])
art = array.array("h", 25 * [0x7FFF])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
|
[
"damien.p.george@gmail.com"
] |
damien.p.george@gmail.com
|
ba2dba9f3f8b6287d25ffbb5992a661635b0b81c
|
f1b9dc71b2dafc2b331de495ef4ceab938734fbe
|
/test.py
|
b80d38b36f71633c8ecb41f54b9fdf2f08e362bc
|
[] |
no_license
|
philippjfr/FOSS4G-2017-Talk
|
7deb6e48755b71658f930aa55c06d3a903f1abc6
|
e3f035dc648cfc4642e774e536d6c07c847417b5
|
refs/heads/master
| 2022-11-07T07:19:37.053629
| 2017-08-23T17:16:38
| 2017-08-23T17:16:38
| 100,023,633
| 2
| 4
| null | 2022-11-01T10:51:44
| 2017-08-11T11:09:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
import holoviews as hv
import param
import parambokeh
import numpy as np
from bokeh.io import curdoc
renderer = hv.renderer('bokeh').instance(mode='server')
class CurveExample(hv.streams.Stream):
color = param.Color(default='#000000', precedence=0)
element = param.ObjectSelector(default=hv.Curve,
objects=[hv.Curve, hv.Scatter, hv.Area],
precedence=0)
amplitude = param.Number(default=2, bounds=(2, 5))
frequency = param.Number(default=2, bounds=(1, 10))
output = parambokeh.view.Plot()
def view(self, *args, **kwargs):
return self.element(self.amplitude*np.sin(np.linspace(0, np.pi*self.frequency)),
vdims=[hv.Dimension('y', range=(-5, 5))])(style=dict(color=self.color))
def event(self, **kwargs):
if not self.output or any(k in kwargs for k in ['color', 'element']):
self.output = hv.DynamicMap(self.view, streams=[self])
else:
super(CurveExample, self).event(**kwargs)
example = CurveExample(name='HoloViews Example')
doc = parambokeh.Widgets(example, callback=example.event, on_init=True, mode='server',
view_position='right', doc=curdoc())
|
[
"P.Rudiger@ed.ac.uk"
] |
P.Rudiger@ed.ac.uk
|
a8b75d380cc2dabf3b993334ea90a79166b071f7
|
729ee5bcb31708a82b08509775786597dac02263
|
/coding-challenges/week06/AssignmentQ4.py
|
40922ab75fb78db62f5805f72f03ea8d78695d15
|
[] |
no_license
|
pandey-ankur-au17/Python
|
67c2478316df30c2ac8ceffa6704cf5701161c27
|
287007646a694a0dd6221d02b47923935a66fcf4
|
refs/heads/master
| 2023-08-30T05:29:24.440447
| 2021-09-25T16:07:23
| 2021-09-25T16:07:23
| 358,367,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
# Given an array of size n and a number k, find all elements that appear
# more than n/k times
# Input : k = 4 ,n=9 , A = [ 3 ,1, 2, 2, 2, 1, 4, 3, 3 ]
# # Output: - [ 3 , 2]
list1=list(map(int,input("Enter the list=").split()))
n=len(list1)
k=int(input("Enter the value of k="))
frequency={}
for i in list1:
if i in frequency:
frequency[i]=frequency[i]+1
else:
frequency[i]=1
output=[]
for i,j in frequency.items():
if j>n//k:
output.append(i)
else:
continue
print(output)
|
[
"ankurpandey131@gmail.com"
] |
ankurpandey131@gmail.com
|
98a3bb666aa53326b5eaed0135122f7aa1ea659d
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part002017.py
|
3e62b15d60a4b9a7663df6952ee01bd0980a8d61
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher85189(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher85189._instance is None:
CommutativeMatcher85189._instance = CommutativeMatcher85189()
return CommutativeMatcher85189._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 85188
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
d04aaef21caf30a7b3162da917e0162e5972d2ce
|
4e93e4275e82a08d3c114c9dd72deb0959d41a55
|
/src/ch10/binary/__init__.py
|
6f8ad9a2cdb18a4e50d6eeff4b4bd01581729622
|
[] |
no_license
|
wsjhk/wasm-python-book
|
fd38e5a278be32df49416616724f38a415614e8b
|
872bc8fe754a6a3573436f534a8da696c0486c24
|
refs/heads/master
| 2023-03-16T02:18:48.869794
| 2020-09-17T11:31:49
| 2020-09-17T11:31:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: __init__.py
@time: 2020/8/19 1:55
@project: wasm-python-book
@desc:
"""
from ch10.binary import reader
decode_file = reader.decode_file
|
[
"huruifeng1202@163.com"
] |
huruifeng1202@163.com
|
9d9b777e9db5f14839481e9edb2dc062d203210a
|
93b5da40708878016d953aeb4d9b908ff8af1e04
|
/function/practice2.py
|
3a807a52ef9fe652abe3614a37febbc4e2a91658
|
[] |
no_license
|
Isaccchoi/python-practice
|
e50e932d2a7bf13b54e5ca317a03a5d63b406c6b
|
70e3e1f8590667cfe5ba4c094873eb39d555c44a
|
refs/heads/master
| 2021-06-29T05:45:18.371743
| 2017-09-21T06:15:47
| 2017-09-21T06:15:47
| 103,229,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
def sequential_search(str, key):
count = 0
while count < len(str):
if str[count] == key:
return count
else:
count += 1
return 0
print(sequential_search("개구리고양이", "개"))
print(sequential_search("개구리고양이", "구"))
print(sequential_search("개구리고양이", "리"))
print(sequential_search("개구리고양이", "고"))
print(sequential_search("개구리고양이", "양"))
print(sequential_search("개구리고양이", "이"))
print(sequential_search("개구리고양이", "말"))
|
[
"isaccchoi@naver.com"
] |
isaccchoi@naver.com
|
d85ff8da66e28d47df80755796b5b30c21127fba
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/able_way/long_way/take_eye.py
|
cfcd983c2042f55d5d6e7c99ddba0ad24bf58cf2
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#! /usr/bin/env python
def good_time(str_arg):
problem_and_work(str_arg)
print('high_thing')
def problem_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
good_time('able_fact_and_life')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
a4096b7f1c4116a6ffaf257384b64bd4bd388996
|
5d302c38acd02d5af4ad7c8cfe244200f8e8f877
|
/String/6. ZigZag Conversion(Med).py
|
1e80504fb03188e5ba70ed8751bed04c9c9c96c4
|
[] |
no_license
|
nerohuang/LeetCode
|
2d5214a2938dc06600eb1afd21686044fe5b6db0
|
f273c655f37da643a605cc5bebcda6660e702445
|
refs/heads/master
| 2023-06-05T00:08:41.312534
| 2021-06-21T01:03:40
| 2021-06-21T01:03:40
| 230,164,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
store = [["" for _ in range(len(s))]for _ in range(numRows)];
i = 0;
m, n = -1, 1;
while i < len(s):
if m == -1:
m += 1;
n -= 1;
while m < numRows:
if i < len(s) and store[m][n] == "":
store[m][n] = s[i];
i += 1;
m += 1;
if m == numRows:
m -= 2;
n += 1;
while m >= 0:
if i < len(s):
store[m][n] = s[i];
m -= 1;
n += 1;
i += 1;
else:
break;
ans = ""
for i in range(len(store)):
for c in store[i]:
if store[i] != "":
ans += c
return(ans)
#class Solution:
# def convert(self, s: str, numRows: int) -> str:
# if numRows == 1:
# return s
#
# lines = [''] * numRows
# line_count = 0
# adder = 1
# for c in s:
# lines[line_count] = lines[line_count] + c
#
# if line_count + adder > numRows-1:
# adder = -1
# elif line_count + adder < 0:
# adder = 1
#
# line_count = line_count + adder
# return ''.join(lines)
|
[
"huangxingyu00@gmail.com"
] |
huangxingyu00@gmail.com
|
83216ab4814f0ddc0657688c7f97149e35a3bdbb
|
142362be3c4f8b19bd118126baccab06d0514c5b
|
/xapian64/site-packages/djapian/utils/decorators.py
|
1deb7553ccf6014639b53b8845f5a841e5fbcb2e
|
[] |
no_license
|
dkramorov/astwobytes
|
84afa4060ffed77d5fd1a6e8bf5c5c69b8115de6
|
55071537c5c84d0a27757f11ae42904745cc1c59
|
refs/heads/master
| 2023-08-27T07:10:51.883300
| 2023-08-02T16:52:17
| 2023-08-02T16:52:17
| 191,950,319
| 0
| 0
| null | 2022-11-22T09:15:42
| 2019-06-14T13:44:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 898
|
py
|
import xapian
def retry_if_except(errors, num_retry=4, cleanup_callback=None):
def _wrap(func):
def _inner(*args, **kwargs):
for n in reversed(range(num_retry)):
try:
return func(*args, **kwargs)
except errors:
# propagate the exception if we have run out of tries
if not n:
raise
# perform a clean up action before the next attempt if required
if callable(cleanup_callback):
cleanup_callback()
return _inner
return _wrap
def reopen_if_modified(database, num_retry=3,
errors=xapian.DatabaseModifiedError):
return retry_if_except(errors,
num_retry=num_retry,
cleanup_callback=lambda: database.reopen())
|
[
"zergo01@yandex.ru"
] |
zergo01@yandex.ru
|
14f7ea5a0fd4e2ab4ffa08421ed6e486da33ccfc
|
4692f28f86ee84a76abfac8cc8a0dd41fcd402e4
|
/tasks/github_tasks.py
|
ddf0c9af7bae0fe3c10ef1e08285fae600084aa1
|
[
"CC0-1.0",
"BSD-3-Clause",
"Apache-2.0",
"GPL-1.0-or-later",
"MIT",
"0BSD",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-2-Clause-Views",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-agent
|
cc4b89839d6031903bf23aa12eccc2a3f3c7f213
|
d960cdb7de8fa5d1c7138cfe58e754af80cb796a
|
refs/heads/main
| 2023-09-04T10:45:08.138748
| 2023-09-04T09:13:43
| 2023-09-04T09:13:43
| 49,970,739
| 2,388
| 1,288
|
Apache-2.0
| 2023-09-14T20:06:34
| 2016-01-19T17:40:41
|
Go
|
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
import os
from invoke import Exit, task
from .libs.github_actions_tools import (
download_artifacts_with_retry,
follow_workflow_run,
print_workflow_conclusion,
trigger_macos_workflow,
)
from .utils import DEFAULT_BRANCH, load_release_versions
@task
def trigger_macos_build(
ctx,
datadog_agent_ref=DEFAULT_BRANCH,
release_version="nightly-a7",
major_version="7",
python_runtimes="3",
destination=".",
version_cache=None,
retry_download=3,
retry_interval=10,
):
env = load_release_versions(ctx, release_version)
github_action_ref = env["MACOS_BUILD_VERSION"]
run = trigger_macos_workflow(
workflow_name="macos.yaml",
github_action_ref=github_action_ref,
datadog_agent_ref=datadog_agent_ref,
release_version=release_version,
major_version=major_version,
python_runtimes=python_runtimes,
# Send pipeline id and bucket branch so that the package version
# can be constructed properly for nightlies.
gitlab_pipeline_id=os.environ.get("CI_PIPELINE_ID", None),
bucket_branch=os.environ.get("BUCKET_BRANCH", None),
version_cache_file_content=version_cache,
)
workflow_conclusion = follow_workflow_run(run)
print_workflow_conclusion(workflow_conclusion)
download_artifacts_with_retry(run, destination, retry_download, retry_interval)
if workflow_conclusion != "success":
raise Exit(code=1)
@task
def trigger_macos_test(
ctx,
datadog_agent_ref=DEFAULT_BRANCH,
release_version="nightly-a7",
python_runtimes="3",
destination=".",
version_cache=None,
retry_download=3,
retry_interval=10,
):
env = load_release_versions(ctx, release_version)
github_action_ref = env["MACOS_BUILD_VERSION"]
run = trigger_macos_workflow(
workflow_name="test.yaml",
github_action_ref=github_action_ref,
datadog_agent_ref=datadog_agent_ref,
python_runtimes=python_runtimes,
version_cache_file_content=version_cache,
)
workflow_conclusion = follow_workflow_run(run)
print_workflow_conclusion(workflow_conclusion)
download_artifacts_with_retry(run, destination, retry_download, retry_interval)
if workflow_conclusion != "success":
raise Exit(code=1)
@task
def lint_codeowner(_):
"""
Check every package in `pkg` has an owner
"""
base = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(base, "..")
os.chdir(root_folder)
owners = _get_code_owners(root_folder)
# make sure each root package has an owner
pkgs_without_owner = _find_packages_without_owner(owners, "pkg")
if len(pkgs_without_owner) > 0:
raise Exit(
f'The following packages in `pkg` directory don\'t have an owner in CODEOWNERS: {pkgs_without_owner}',
code=1,
)
def _find_packages_without_owner(owners, folder):
pkg_without_owners = []
for x in os.listdir(folder):
path = os.path.join("/" + folder, x)
if path not in owners:
pkg_without_owners.append(path)
return pkg_without_owners
def _get_code_owners(root_folder):
code_owner_path = os.path.join(root_folder, ".github", "CODEOWNERS")
owners = {}
with open(code_owner_path) as f:
for line in f:
line = line.strip()
line = line.split("#")[0] # remove comment
if len(line) > 0:
parts = line.split()
path = os.path.normpath(parts[0])
# example /tools/retry_file_dump ['@DataDog/agent-metrics-logs']
owners[path] = parts[1:]
return owners
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
019ab9a8348c516eab7132b6900f6f45b8172cdb
|
243ce25168eea65144713a1100ca997a2d29f280
|
/p68.py
|
aaea8a7b5cd711d48a2ecaed6cc2366716f5667f
|
[] |
no_license
|
acadien/projecteuler
|
6aa1efbb1141ecf36d6b23bb6b058070e5e881e0
|
2efb0b5577cee7f046ed4f67d0f01f438cbf3770
|
refs/heads/master
| 2020-04-28T21:33:49.631044
| 2013-12-06T19:25:20
| 2013-12-06T19:25:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/python
from math import *
from random import *
from itertools import chain,permutations
o_ind=range(5)
i_ind=[[5,6],[6,7],[7,8],[8,9],[9,5]]
def trysum(A):
if 10 in A[5:]:
return False
B=set([A[i]+A[i_ind[i][0]]+A[i_ind[i][1]] for i in range(5)])
if len(B)==1:
return True
return False
def flatten(listOfLists):
return chain.from_iterable(listOfLists)
def tochain(A):
start=A.index(min(A[:5]))
return int("".join(map(str,flatten([[A[o_ind[j]],A[i_ind[j][0]],A[i_ind[j][1]]] for j in map(lambda x:x%5,range(start,start+5))]))))
mx=0
for A in permutations(range(1,11)):
if trysum(A):
Aval=tochain(A)
if Aval>mx:
print Aval
mx=Aval
|
[
"adamcadien@gmail.com"
] |
adamcadien@gmail.com
|
ae9e61d3ae9ee479adabb49c6e4d75d50cecfd7e
|
b1f7c8eecdfc1e54e868430d7b6192b162f5a530
|
/insta/signals.py
|
2177664422d6238343c5645260f46897273c08a5
|
[] |
no_license
|
Nyagah-Tech/instagramWebApp
|
490c9d8874c082132e9a0d78eb849e2b1136656b
|
abf3421a408ac1daf5f5bf20b76073ad73894eba
|
refs/heads/master
| 2022-12-13T04:33:26.104920
| 2020-01-06T21:18:17
| 2020-01-06T21:18:17
| 229,194,006
| 0
| 0
| null | 2022-11-22T05:13:48
| 2019-12-20T05:08:18
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def create_profile(sender,instance,created,**kwargs):
'''
this is a function that creates a profile of a user after registration
'''
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender,instance, **kwargs):
'''
this is a fuunction that saves the profile after been made
'''
instance.profile.save()
|
[
"dan@localhost.localdomain"
] |
dan@localhost.localdomain
|
62b7897f6f243bde43c73bd0addea96c61ff23d3
|
e2d23d749779ed79472a961d2ab529eeffa0b5b0
|
/gcloud/tests/core/models/test_user_default_project.py
|
a8944c2c0e485a689a536b1e3088ebda6139b172
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
manlucas/atom
|
9fa026b3f914e53cd2d34aecdae580bda09adda7
|
94963fc6fdfd0568473ee68e9d1631f421265359
|
refs/heads/master
| 2022-09-30T06:19:53.828308
| 2020-01-21T14:08:36
| 2020-01-21T14:08:36
| 235,356,376
| 0
| 0
|
NOASSERTION
| 2022-09-16T18:17:08
| 2020-01-21T14:04:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import factory
from django.db.models import signals
from django.test import TestCase
from gcloud.core.models import Project, UserDefaultProject
class UserDefaultProjectTestCase(TestCase):
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def tearDown(self):
Project.objects.all().delete()
UserDefaultProject.objects.all().delete()
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def test_init_user_default_project__first_set(self):
project = Project.objects.create(name='name',
creator='creator',
desc='', )
dp = UserDefaultProject.objects.init_user_default_project('username', project)
self.assertEqual(dp.default_project.id, project.id)
@factory.django.mute_signals(signals.post_save, signals.post_delete)
def test_init_user_default_project__second_set(self):
project_1 = Project.objects.create(name='name',
creator='creator',
desc='', )
project_2 = Project.objects.create(name='name',
creator='creator',
desc='', )
UserDefaultProject.objects.init_user_default_project('username', project_1)
dp = UserDefaultProject.objects.init_user_default_project('username', project_2)
self.assertEqual(dp.default_project.id, project_1.id)
|
[
"lucaswang@canway.net"
] |
lucaswang@canway.net
|
02778531cc548dda2bfadf226376a93af1bcd11f
|
746bf62ae3599f0d2dcd620ae37cd11370733cc3
|
/leetcode/contains-duplicate.py
|
768cd64e1cb979d349fc2bf6872d9d0a27bb7e6b
|
[] |
no_license
|
wanglinjie/coding
|
ec0e614343b39dc02191455165eb1a5c9e6747ce
|
350f28cad5ec384df476f6403cb7a7db419de329
|
refs/heads/master
| 2021-04-22T14:00:48.825959
| 2017-05-02T12:49:05
| 2017-05-02T12:49:05
| 48,011,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Author: Wanglj
Create Time : 20151223
Last Modified:
判断列表中是否有重复的
'''
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums_set = set(nums)
if len(nums_set) < len(nums):
return True
else:
return False
|
[
"hitwhwlj@163.com"
] |
hitwhwlj@163.com
|
f9d7f2202c7c7b8cfb47887171023887d23fb306
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_192/ch50_2020_03_31_18_26_15_429003.py
|
9cc22791965cdb59d8ef25da1a85490c844cd611
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def junta_nome_sobrenome(n, s):
n_s = []
espaco = [' ']*len(n)
i = 0
while i < len(n):
n_s.append(n[i] = espaco[i] + s[i])
i += 1
return n_s
|
[
"you@example.com"
] |
you@example.com
|
38723203b79a0913486767469b468bcf4790caac
|
795ba44e09add69a6c3859adf7e476908fcb234c
|
/backend/mod_training_1_27492/urls.py
|
58881d4c5e4d89926432059cb0802a8f7062e3db
|
[] |
no_license
|
crowdbotics-apps/mod-training-1-27492
|
0df7b863ef18e7ba4e3f1c34f1bac7554e184553
|
91815c10d4f8af1e18bb550db97373b4099a4ae9
|
refs/heads/master
| 2023-04-26T00:57:13.854464
| 2021-06-01T19:48:41
| 2021-06-01T19:48:41
| 371,498,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,247
|
py
|
"""mod_training_1_27492 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "mod-training-1"
admin.site.site_title = "mod-training-1 Admin Portal"
admin.site.index_title = "mod-training-1 Admin"
# swagger
api_info = openapi.Info(
title="mod-training-1 API",
default_version="v1",
description="API documentation for mod-training-1 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
0e8eb7cfd16b0572c20b19d0ca5619c7eecbd659
|
7c9917d62959b8d69309d44362481356e632083e
|
/enzymatic_bias/profile_model_of_bias/kmer_init_tuned/dnase/24mer/model.py
|
8e07a5bc7b4e1400fd6be929ca7b482df4c88a82
|
[
"MIT"
] |
permissive
|
kundajelab/bias_correction
|
bee77bd2d36268aa6b7046b817e9e349c8cc8238
|
521678ea8739473f793b0ce85e22e622d13df6fe
|
refs/heads/master
| 2021-06-21T11:34:54.558788
| 2021-06-10T06:39:35
| 2021-06-10T06:39:35
| 218,137,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,974
|
py
|
import pickle
import pdb
import numpy as np ;
from keras.backend import int_shape
from sklearn.metrics import average_precision_score
from kerasAC.metrics import *
from kerasAC.custom_losses import *
import keras;
#import the various keras layers
from keras.layers import Dense,Activation,Dropout,Flatten,Reshape,Input, Concatenate, Cropping1D, Add
from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import GlobalMaxPooling1D,MaxPooling1D,GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.constraints import maxnorm;
from keras.regularizers import l1, l2
from keras.models import Model
def get_model_param_dict(param_file):
'''
param_file has 2 columns -- param name in column 1, and param value in column 2
'''
params={}
if param_file is None:
return params
for line in open(param_file,'r').read().strip().split('\n'):
tokens=line.split('\t')
params[tokens[0]]=tokens[1]
return params
def getModelGivenModelOptionsAndWeightInits(args):
#default params (can be overwritten by providing model_params file as input to the training function)
filters=1
conv1_kernel_size=6
control_smoothing=[1, 50]
counts_loss_weight=1
profile_loss_weight=1
model_params=get_model_param_dict(args.model_params)
if 'filters' in model_params:
filters=int(model_params['filters'])
if 'conv1_kernel_size' in model_params:
conv1_kernel_size=int(model_params['conv1_kernel_size'])
if 'counts_loss_weight' in model_params:
counts_loss_weight=float(model_params['counts_loss_weight'])
if 'profile_loss_weight' in model_params:
profile_loss_weight=float(model_params['profile_loss_weight'])
print("params:")
print("filters:"+str(filters))
print("conv1_kernel_size:"+str(conv1_kernel_size))
print("counts_loss_weight:"+str(counts_loss_weight))
print("profile_loss_weight:"+str(profile_loss_weight))
#load the fixed weights
tobias_data_dnase_k562=pickle.load(open("/srv/scratch/annashch/bias_correction/enzymatic_bias/tobias/dnase/K562.filtered_AtacBias.pickle",'rb'))
tobias_dnase_pssm_forward=np.transpose(tobias_data_dnase_k562.bias['forward'].pssm[0:4])[:,[0,2,3,1]]
conv1_pwm=np.expand_dims(tobias_dnase_pssm_forward,axis=-1)
conv1_bias=np.zeros((1,))
conv1_frozen_weights=[conv1_pwm, conv1_bias]
#read in arguments
seed=args.seed
init_weights=args.init_weights
sequence_flank=args.tdb_input_flank[0]
num_tasks=args.num_tasks
seq_len=2*sequence_flank
out_flank=args.tdb_output_flank[0]
out_pred_len=2*out_flank
print(seq_len)
print(out_pred_len)
#define inputs
inp = Input(shape=(seq_len, 4),name='sequence')
# first convolution without dilation
first_conv = Conv1D(filters,
weights=conv1_frozen_weights,
kernel_size=conv1_kernel_size,
padding='valid',
activation='relu',
name='1st_conv')(inp)
profile_out_prebias_shape =int_shape(first_conv)
cropsize = int(profile_out_prebias_shape[1]/2)-int(out_pred_len/2)
if profile_out_prebias_shape[1]%2==0:
crop_left=cropsize
crop_right=cropsize
else:
crop_left=cropsize
crop_right=cropsize+1
print(crop_left)
print(crop_right)
profile_out_prebias = Cropping1D((crop_left,crop_right),
name='prof_out_crop2match_output')(first_conv)
profile_out = Conv1D(filters=num_tasks,
kernel_size=1,
name="profile_predictions")(profile_out_prebias)
gap_combined_conv = GlobalAveragePooling1D(name='gap')(first_conv)
count_out = Dense(num_tasks, name="logcount_predictions")(gap_combined_conv)
model=Model(inputs=[inp],outputs=[profile_out,
count_out])
print("got model")
model.compile(optimizer=Adam(),
loss=[MultichannelMultinomialNLL(1),'mse'],
loss_weights=[profile_loss_weight,counts_loss_weight])
print("compiled model")
return model
if __name__=="__main__":
import argparse
parser=argparse.ArgumentParser(description="view model arch")
parser.add_argument("--seed",type=int,default=1234)
parser.add_argument("--init_weights",default=None)
parser.add_argument("--tdb_input_flank",nargs="+",default=[673])
parser.add_argument("--tdb_output_flank",nargs="+",default=[500])
parser.add_argument("--num_tasks",type=int,default=1)
parser.add_argument("--model_params",default=None)
args=parser.parse_args()
model=getModelGivenModelOptionsAndWeightInits(args)
print(model.summary())
pdb.set_trace()
|
[
"annashcherbina@gmail.com"
] |
annashcherbina@gmail.com
|
c51867d2cd55c0e97a84af6332cfcfd529eeb1d2
|
40be7c7a50b839a922c22ea624123b11e5da25cb
|
/feria/migrations/0003_franquicia_imagen.py
|
c252e9fbb8cc5b7835b3b8c49f36ae453c9fd74f
|
[] |
no_license
|
LuberNavarrete/sistema
|
e70b15d0410402ceb6f3ba2a320886d5b225c65c
|
e4800f3aa2cdde69189a43dcf9543be85ed14693
|
refs/heads/master
| 2021-01-10T16:54:21.479466
| 2015-11-15T14:21:20
| 2015-11-15T14:21:20
| 44,011,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feria', '0002_auto_20151008_1937'),
]
operations = [
migrations.AddField(
model_name='franquicia',
name='imagen',
field=models.ImageField(null=True, upload_to=b'imagenes'),
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
951e87820000a2ba516ce5733b3db4e751382387
|
be0eda70579e191d7dd1ace87ccbda8a3e85474e
|
/app01/urls.py
|
78bd401466e3cf6df768de0ca595ee3d970007da
|
[] |
no_license
|
zxycode-2020/django_tutrital2
|
084ebe4a83e7a9724163ae54b816239ff2b0cce6
|
969ce0b3caca92c045afee0e5eb628f9afb35b48
|
refs/heads/master
| 2022-05-03T23:22:17.075830
| 2020-02-05T03:11:09
| 2020-02-05T03:11:09
| 236,920,126
| 0
| 0
| null | 2022-04-22T23:00:29
| 2020-01-29T06:40:39
|
Python
|
UTF-8
|
Python
| false
| false
| 577
|
py
|
from django.urls import path, include
from app01.views import index, article, test_url, student, \
students, args, reg, xuanran, orm_test, post_cls, get_cls
urlpatterns = [
path('index/', index),
path('article/<str:aid>/', article),
path('test_url/', test_url),
path('students/', students), # 学生列表
path('student/<str:stu_id>/', student), # 学生单个
path('args/', args),
path('reg/', reg),
path('xuanran/', xuanran),
path('orm_test/', orm_test),
path('post_cls/', post_cls),
path('get_cls/', get_cls),
]
|
[
"1049939190@qq.com"
] |
1049939190@qq.com
|
2b792a76a6d249e279abf8afff4ad007a551e9e7
|
d42a9128898d504a9831f1afee3198c4677236c9
|
/Level_3/가장먼노드.py
|
19833971ef9bcd673871a0c8a749d8662f7847c8
|
[] |
no_license
|
ketkat001/Programmers-coding
|
6848a9c8cffd97b792cfc8856ec135b72af5d688
|
799baba8d66a9971b43233d231cecbf262b4ea27
|
refs/heads/master
| 2023-09-02T23:07:25.614820
| 2021-10-17T18:12:02
| 2021-10-17T18:12:02
| 235,016,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
from collections import deque
def solution(n, edge):
answer = 0
graph = [[] for _ in range(n+1)]
dp = [0] * (n+1)
dp[1] = 1
queue = deque([1])
for edg in edge:
graph[edg[0]].append(edg[1])
graph[edg[1]].append(edg[0])
while queue:
answer = len(queue)
for i in range(answer):
next_node = queue.popleft()
for target_node in graph[next_node]:
if dp[target_node] == 0:
dp[target_node] = 1
queue.append(target_node)
return answer
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]))
|
[
"ketkat001@gmail.com"
] |
ketkat001@gmail.com
|
ecad99a07312379715f64a3e39b3ea5577d254ee
|
c5746efe18a5406764c041d149d89c0e0564c5a5
|
/1. Python语言核心编程/1. Python核心/Day07/exercise11.py
|
a656871fe2f7a823083003ada2bd5ae8242e8c9c
|
[] |
no_license
|
ShaoxiongYuan/PycharmProjects
|
fc7d9eeaf833d3711211cd2fafb81dd277d4e4a3
|
5111d4c0a7644c246f96e2d038c1a10b0648e4bf
|
refs/heads/master
| 2021-12-15T05:45:42.117000
| 2021-11-23T06:45:16
| 2021-11-23T06:45:16
| 241,294,858
| 3
| 1
| null | 2021-02-20T15:29:07
| 2020-02-18T07:06:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
def sum_digit(num):
"""
:param num:
:return:
"""
count = 0
for item in str(num):
count += int(item)
return count
print(sum_digit(1234))
|
[
"ysxstevenpp123@gmail.com"
] |
ysxstevenpp123@gmail.com
|
4052c872dbac2fd274177618ea0b913cd7c86450
|
6a9f06b967d7641ddff7b56425651b29d3e577f4
|
/mindinsight/mindinsight/backend/datavisual/train_visual_api.py
|
a868a443c817c402a689b20195737d12c7706bd9
|
[
"Apache-2.0"
] |
permissive
|
ZeroWangZY/DL-VIS
|
b3117016547007b88dc66cfe7339ef02b0d84e9c
|
8be1c70c44913a6f67dd424aa0e0330f82e48b06
|
refs/heads/master
| 2023-08-18T00:22:30.906432
| 2020-12-04T03:35:50
| 2020-12-04T03:35:50
| 232,723,696
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,850
|
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Backend interface module.
This module provides the interfaces to train processors functions.
"""
from flask import Blueprint
from flask import request
from flask import jsonify
from mindinsight.conf import settings
from mindinsight.datavisual.utils.tools import get_train_id
from mindinsight.datavisual.utils.tools import if_nan_inf_to_none
from mindinsight.datavisual.processors.histogram_processor import HistogramProcessor
from mindinsight.datavisual.processors.images_processor import ImageProcessor
from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
from mindinsight.datavisual.processors.graph_processor import GraphProcessor
from mindinsight.datavisual.data_transform.data_manager import DATA_MANAGER
BLUEPRINT = Blueprint("train_visual", __name__, url_prefix=settings.URL_PATH_PREFIX+settings.API_PREFIX)
@BLUEPRINT.route("/datavisual/image/metadata", methods=["GET"])
def image_metadata():
"""
Interface to fetch metadata about the images for the particular run,tag, and zero-indexed sample.
Returns:
Response, which contains a list in JSON containing image events, each
one of which is an object containing items wall_time, step, width,
height, and query.
"""
tag = request.args.get("tag")
train_id = get_train_id(request)
processor = ImageProcessor(DATA_MANAGER)
response = processor.get_metadata_list(train_id, tag)
return jsonify(response)
@BLUEPRINT.route("/datavisual/image/single-image", methods=["GET"])
def single_image():
"""
Interface to fetch raw image data for a particular image.
Returns:
Response, which contains a byte string of image.
"""
tag = request.args.get("tag")
step = request.args.get("step")
train_id = get_train_id(request)
processor = ImageProcessor(DATA_MANAGER)
img_data = processor.get_single_image(train_id, tag, step)
return img_data
@BLUEPRINT.route("/datavisual/scalar/metadata", methods=["GET"])
def scalar_metadata():
"""
Interface to fetch metadata about the scalars for the particular run and tag.
Returns:
Response, which contains a list in JSON containing scalar events, each
one of which is an object containing items' wall_time, step and value.
"""
tag = request.args.get("tag")
train_id = get_train_id(request)
processor = ScalarsProcessor(DATA_MANAGER)
response = processor.get_metadata_list(train_id, tag)
metadatas = response['metadatas']
for metadata in metadatas:
value = metadata.get("value")
metadata["value"] = if_nan_inf_to_none('scalar_value', value)
return jsonify(response)
@BLUEPRINT.route("/datavisual/graphs/nodes", methods=["GET"])
def graph_nodes():
"""
Interface to get graph nodes.
Returns:
Response, which contains a JSON object.
"""
name = request.args.get('name', default=None)
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
response = graph_process.list_nodes(scope=name)
return jsonify(response)
@BLUEPRINT.route("/datavisual/graphs/nodes/names", methods=["GET"])
def graph_node_names():
"""
Interface to query node names.
Returns:
Response, which contains a JSON object.
"""
search_content = request.args.get("search")
offset = request.args.get("offset", default=0)
limit = request.args.get("limit", default=100)
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
resp = graph_process.search_node_names(search_content, offset, limit)
return jsonify(resp)
@BLUEPRINT.route("/datavisual/graphs/single-node", methods=["GET"])
def graph_search_single_node():
"""
Interface to search single node.
Returns:
Response, which contains a JSON object.
"""
name = request.args.get("name")
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
resp = graph_process.search_single_node(name)
return jsonify(resp)
@BLUEPRINT.route("/datavisual/histograms", methods=["GET"])
def histogram():
"""
Interface to obtain histogram data.
Returns:
Response, which contains a JSON object.
"""
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
processor = HistogramProcessor(DATA_MANAGER)
response = processor.get_histograms(train_id, tag)
return jsonify(response)
@BLUEPRINT.route("/datavisual/scalars", methods=["GET"])
def get_scalars():
"""Get scalar data for given train_ids and tags."""
train_ids = request.args.getlist('train_id')
tags = request.args.getlist('tag')
processor = ScalarsProcessor(DATA_MANAGER)
scalars = processor.get_scalars(train_ids, tags)
return jsonify({'scalars': scalars})
def init_module(app):
"""
Init module entry.
Args:
app (Flask): The application obj.
"""
app.register_blueprint(BLUEPRINT)
|
[
"756762961@qq.com"
] |
756762961@qq.com
|
904accd2539767b15763cd55082659294465b998
|
a2e11ec88ef3c83b9f07129e76a3681a676d164f
|
/demo8apr/testapp/urls.py
|
612a33b0ec158d2795dc24c6b407ab4fabc9dc74
|
[] |
no_license
|
qwertypool/lofo
|
dadd7cd5b149a3a200b7111d803b1d0195d76642
|
3bc7bd125e7ea5a67f51dd6dd654e38a5f218055
|
refs/heads/master
| 2022-05-18T09:31:11.456634
| 2020-04-18T14:47:44
| 2020-04-18T14:47:44
| 256,773,858
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from django.urls import path
from testapp import views
urlpatterns = [
path('form/',views.form_view,name='forms'),
path('thankyou/',views.thankyou_view,name='thankyou'),
path('list/',views.list_view,name='list'),
path('elist/',views.elist_view,name='elist'),
path('eform/',views.eform_view,name='eform'),
path('demo/',views.demo_view,name='demo'),
]
|
[
"deepapandey364@gmail.com"
] |
deepapandey364@gmail.com
|
32275f30d3edfcdfabbae11c0e0d3061a353a050
|
33f9056de72ea429774cdf42d3f813a4cd33a255
|
/backend/takeout/admin/models/admin.py
|
803776d2c167a36a3dde181c74e8d18c7d90e965
|
[
"MIT"
] |
permissive
|
alex159s/Take-out
|
a566e35d5c05c6e8456beb449c08b6c6479f4e79
|
27c66dcc4f0e045ae060255679a2aa68c0f744d2
|
refs/heads/master
| 2020-04-06T06:36:51.806309
| 2016-07-15T14:27:06
| 2016-07-15T14:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
# coding: utf-8
from lib.models.userbase import UserBase
class Admin(UserBase):
def to_string(self):
return {
"id": self.id,
"username": self.username,
"nickname": self.nickname,
}
def to_detail_string(self):
return self.to_string()
|
[
"billo@qq.com"
] |
billo@qq.com
|
25bec75d335fd19663fb549bac3f111228adcee2
|
f3e51466d00510f1dae58f1cb87dd53244ce4e70
|
/LeetCodes/facebook/ReverseLinkedList.py
|
9184e30348f7f6739d047928f0e48937973b5b12
|
[] |
no_license
|
chutianwen/LeetCodes
|
40d18e7aa270f8235342f0485bfda2bd1ed960e1
|
11d6bf2ba7b50c07e048df37c4e05c8f46b92241
|
refs/heads/master
| 2022-08-27T10:28:16.594258
| 2022-07-24T21:23:56
| 2022-07-24T21:23:56
| 96,836,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
'''
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseListIterative(self, head):
new_head = None
while head:
# watch out, this order is not right!!!
# in this case, head will be head.next first, so head can be None, then head.next = new_head will have problem.
# new_head, head, head.next = head, head.next, new_head
head.next, new_head, head = new_head, head, head.next
return new_head
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next: return head
else:
node = self.reverseList(head.next)
head.next.next= = head
head.next = None
return node
|
[
"tianwen.chu@fedcentric.com"
] |
tianwen.chu@fedcentric.com
|
01dca87891811dc9c80df4ec667f35a0d253a385
|
2d997384a86f0d9c0cdb80a1141908abfdf674cc
|
/ML_homework8/task.py
|
3a7d666193d64500051f875d33be8500c66db5a8
|
[] |
no_license
|
Alice-Avetisyan/Machine_Learning
|
1ddc91fad066f3abf0457d036aa783f0fc40a46f
|
9a0cc83c6d90ef58703a383f066ef857bb124334
|
refs/heads/master
| 2021-01-16T09:07:20.435898
| 2020-06-18T08:44:17
| 2020-06-18T08:44:17
| 243,054,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
from sklearn.datasets import make_classification
# Generate a random n-class classification problem
X, y = make_classification(100, 3, 2, 1, class_sep=0.5) # 2 of 3 features are informative and 1 is redundant
# 100 -> number of samples/rows,
# 3 -> number of features/columns,
# 2 -> number of informative features,
# 1 -> number of redundant features (useless data)
# class_sep -> the complexity if the model
import matplotlib.pyplot as plt
# plt.hist(X[:, 1]) # all rows of the second column
# plt.show()
# plt.scatter(X[:, 0], X[:, 1])
# plt.show()
fig = plt.figure()
axis1 = fig.add_subplot(1, 2, 1)
axis1.hist(X[:, 1])
axis2 = fig.add_subplot(1, 2, 2)
axis2.scatter(X[:, 0], X[:, 1])
plt.show()
# plots the class distribution
for i in range(len(X)):
if y[i] == 0:
plt.scatter(X[i, 0], X[i, 1], marker='*', color='b')
else:
plt.scatter(X[i, 0], X[i, 1], marker='D', color='r')
plt.show()
from sklearn.svm import SVC
svc_model = SVC(kernel='rbf')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=101)
svc_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
y_pred = svc_model.predict(X_test)
print("Model Accuracy: ", accuracy_score(y_test, y_pred))
# converting the data into DataFrame
import pandas as pd
custom_df = pd.DataFrame(X, columns=['X1', 'X2', 'X3'])
custom_df.insert(len(custom_df.columns), 'y', pd.DataFrame(y))
print(custom_df)
# turning the data into a csv file
custom_df.to_csv('custom_data.csv', index=False)
csv = pd.read_csv('custom_data.csv')
print(csv)
|
[
"noreply@github.com"
] |
Alice-Avetisyan.noreply@github.com
|
9e30f6dd5e43574bbc8c96b3976c5ed164f00864
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/vehicle_systems/components/__init__.py
|
057145a5b192b33ecbef57d0bc595f52f45d7640
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/vehicle_systems/components/__init__.py
pass
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
10ebb5a33de3a78479eeeeab0075a4ed9b9b5b16
|
bcb56cc126ea1885eb5ecc920884e2e331def045
|
/Part B/Letter.py
|
5536de9080655ae9b4a8c2bdfc7f925462a11551
|
[] |
no_license
|
priyanshkedia04/Codeforces-Solutions
|
2d11cb7b8329fe658f983b7212c17fc89fd784f0
|
a5197c633bf4c3238f48bfb5b308144c2ffba473
|
refs/heads/main
| 2023-06-06T13:10:13.787843
| 2021-07-01T14:06:52
| 2021-07-01T14:06:52
| 382,000,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from collections import Counter
s1 = dict(Counter(list(input())))
s2 = dict(Counter(list(input())))
count = 0
if ' ' in s2:
del s2[' ']
for i in s2:
if i in s1 and i:
if s1[i] >= s2[i]:
count += 1
if count == len(s2):
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
priyanshkedia04.noreply@github.com
|
dab8b347c67f8225bb55fa6570fe28846ab87f79
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/101/usersdata/227/49714/submittedfiles/av1_m3.py
|
24617831cbcfc8ae60100af176c4d6c910776e9e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
# -*- coding: utf-8 -*-
import math
m=int(input('digite o número de termos:'))
a=4
pi=0
for i in range(2,m+1,2):
b=i+1
c=b+1
pi=3+(a/(i*b*c)
print('%.6d'%pi)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
464aca6bdb71bea101e69f9172bcaeea0fdf5dee
|
a485f01fd697721356d4405dfef569c50499d652
|
/SipMask-mmdetection/configs/sipmask/sipmask++_r101_caffe_fpn_ssd_6x.py
|
dabccfd91bdc17e15ea79c812b84436a3eeb3192
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Borda/SipMask
|
653333c1c7a7b5e9d0779c28f1b86d17b623aa5f
|
bc63fa93f9291d7b664c065f41d937a65d3c72fd
|
refs/heads/master
| 2023-05-25T11:11:44.452534
| 2021-03-26T02:47:49
| 2021-03-26T02:47:49
| 299,910,001
| 1
| 0
|
MIT
| 2020-09-30T12:22:24
| 2020-09-30T12:22:23
| null |
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
# model settings
model = dict(
type='SipMask',
pretrained='open-mmlab://resnet101_caffe',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='SipMaskHead',
num_classes=81,
in_channels=256,
stacked_convs=2,
ssd_flag=True,
norm_cfg=None,
rescoring_flag = True,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
center_sampling=True,
center_sample_radius=1.5))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.1,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(576, 576), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(544, 544),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[20, 23])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/sipmask++_r101_caffe_fpn_6x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"connor@tju.edu.cn"
] |
connor@tju.edu.cn
|
8d09c7578019ad5f22cf3b2ab7e4e74eaa4c0bbe
|
f3ccd2cf5c1819cf6b2b296a134a59a58deb87a6
|
/03img_classify/classify.py
|
a1bbe880c582e1d737c4fb08b515be56ded6347b
|
[] |
no_license
|
leebinjun/gaze_tracking_ARglasses
|
195120a17a0e4858f4cdfc9516e781567f091fb0
|
63841b565f6fbb16f788268fb1ef991df0142b6b
|
refs/heads/master
| 2020-07-27T01:50:35.800129
| 2019-12-18T03:05:15
| 2019-12-18T03:05:15
| 208,825,449
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
import tensorflow as tf
import numpy as np
import cv2
uid_to_human = {}
for line in tf.gfile.GFile('imagenet_synset_to_human_label_map.txt').readlines():
items = line.strip().split('\t')
uid_to_human[items[0]] = items[1]
node_id_to_uid = {}
for line in tf.gfile.GFile('imagenet_2012_challenge_label_map_proto.pbtxt').readlines():
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1].strip('\n').strip('\"')
node_id_to_uid[target_class] = target_class_string
node_id_to_name = {}
for key, value in node_id_to_uid.items():
node_id_to_name[key] = uid_to_human[value]
def create_graph():
with tf.gfile.FastGFile('classify_image_graph_def.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def classify_image(image, top_k=2):
image_data = tf.gfile.FastGFile(image, 'rb').read()
# print(image_data)
# image_data = cv2.imread(image)
create_graph()
with tf.Session() as sess:
# 'softmax:0': A tensor containing the normalized prediction across 1000 labels
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048 float description of the image
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG encoding of the image
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor, feed_dict={'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-top_k:]
for node_id in top_k:
human_string = node_id_to_name[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
classify_image('IMG_20190917_120404.jpg')
|
[
"296735774@qq.com"
] |
296735774@qq.com
|
4cbbc8616a600058c184f711b2e71766118ee132
|
2c8c7617d98f0349e560291960ecc5fb831bc0af
|
/programmers/level1/min/최대공약수와최소공배수.py
|
67ebc70fa0b925142de49537b9ce963b435f15d2
|
[] |
no_license
|
BU-PS/coding_test
|
e53d9680ae80f32bfb5238795e868d3b37e5dd71
|
c4fbd5034c8f507a858ca021cc7f6cfcf43f402a
|
refs/heads/master
| 2023-03-15T05:26:36.566092
| 2021-03-25T12:36:40
| 2021-03-25T12:36:40
| 316,865,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
# 최대 공약수 (GCD) : 두 개 이상의 자연수의 공통인 약수 중 가장 큰 수
# 1. 최대 공약수를 구하는 법
# - 두수의 약수들을 구한다
# - 두수의 약수들을 집합(set)에 넣는다
# - 교집합을 통해 공약수를 찾는다
# - 교집합을 중 가장 큰 수를 찾는다
# 최소 공배수 (LCM) : 두 수의 공배수가 최소인
# 1. 최소 공배수를 구하는 법
# - N * M = L * C 의 식을 통해 값을 구한
def solution(n: int, m: int):
gcd_value = gcd(n=n, m=m)
lcm_value = lcm(n=n, m=m, g=gcd_value)
return [gcd_value, lcm_value]
def gcd(n: int, m: int):
max_value = max([n, m])
n_cm = set()
m_cm = set()
for i in range(1, max_value + 1):
if n % i == 0:
n_cm.add(i)
if m % i == 0:
m_cm.add(i)
return max(n_cm & m_cm)
def lcm(n: int, m: int, g: int):
return n * m // g
solution(4512, 18)
|
[
"kjhm0607@gmail.com"
] |
kjhm0607@gmail.com
|
27e83ea7ad44899703c2d61c2941e9dcef77cdd2
|
8e583ac7e8a2047f01fa6e9829f9de36022c3265
|
/lib/python/gooey/python_bindings/gooey_parser.py
|
95e2b888765693f41f424ea3c8819bc2d20689f2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jonathanlurie/timelapseComposer
|
ef25c5623d19024e5f83ad6c236497fdcffca10d
|
8de9f1ca626419bacb11bf6c563e79d52fb16a8d
|
refs/heads/master
| 2021-01-10T09:18:58.517707
| 2015-05-25T18:58:40
| 2015-05-25T18:58:40
| 36,248,988
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
from argparse import ArgumentParser
class GooeyParser(object):
def __init__(self, **kwargs):
self.__dict__['parser'] = ArgumentParser(**kwargs)
self.widgets = {}
@property
def _mutually_exclusive_groups(self):
return self.parser._mutually_exclusive_groups
@property
def _actions(self):
return self.parser._actions
@property
def description(self):
return self.parser.description
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
self.parser.add_argument(*args, **kwargs)
self.widgets[self.parser._actions[-1].dest] = widget
def add_mutually_exclusive_group(self, **kwargs):
return self.parser.add_mutually_exclusive_group(**kwargs)
def add_argument_group(self, *args, **kwargs):
return self.parser.add_argument_group(*args, **kwargs)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace)
def __getattr__(self, item):
return getattr(self.parser, item)
def __setattr__(self, key, value):
return setattr(self.parser, key, value)
|
[
"lurie.jo@gmail.com"
] |
lurie.jo@gmail.com
|
3bec992595116b04adc9b11ce51ab2e1693e2a4b
|
38c8cca903432a88a6141dab4b9ac24740ae9e39
|
/src/crike_django/manage.py
|
8937161661f10d2505a8b82f54ecd20f227091b2
|
[
"Apache-2.0"
] |
permissive
|
mxwww/crike
|
3d37882e75a4f7170d183d2050d6a643a72f381b
|
141bd1c9b37882f0369dd8231cdf3576eeb7a5e1
|
refs/heads/master
| 2023-07-19T08:09:25.806263
| 2016-06-11T05:30:46
| 2016-06-11T05:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crike_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"geekan@foxmail.com"
] |
geekan@foxmail.com
|
a2190e26fa997ddb6b13f10b274f5d200b6e3918
|
de6fb3a55196b6bd36a4fda0e08ad658679fb7a1
|
/vt_manager/src/python/vt_manager/models/utils/Choices.py
|
8b045af44501156cda604d3c0cf5c53e35f5078b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
dana-i2cat/felix
|
4a87af639e4c7db686bfa03f1ae4ce62711615e3
|
059ed2b3308bda2af5e1942dc9967e6573dd6a53
|
refs/heads/master
| 2021-01-02T23:12:43.840754
| 2016-02-04T10:04:24
| 2016-02-04T10:04:24
| 17,132,912
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
class VirtTechClass:
VIRT_TECH_TYPE_XEN = "xen"
VIRT_TECH_CHOICES = (
(VIRT_TECH_TYPE_XEN, 'XEN'),
)
@staticmethod
def validateVirtTech(value):
for tuple in VirtTechClass.VIRT_TECH_CHOICES:
if value in tuple:
return
raise Exception("Virtualization Type not valid")
class OSDistClass():
OS_DIST_TYPE_DEBIAN = "Debian"
OS_DIST_TYPE_UBUNTU = "Ubuntu"
OS_DIST_TYPE_REDHAT = "RedHat"
OS_DIST_TYPE_CENTOS = "CentOS"
OS_DIST_CHOICES = (
(OS_DIST_TYPE_DEBIAN, 'Debian'),
(OS_DIST_TYPE_UBUNTU, 'Ubuntu'),
(OS_DIST_TYPE_REDHAT, 'RedHat'),
(OS_DIST_TYPE_CENTOS, 'CentOS'),
)
@staticmethod
def validateOSDist(value):
for tuple in OSDistClass.OS_DIST_CHOICES:
if value in tuple:
return
raise Exception("OS Distribution not valid")
class OSVersionClass():
OS_VERSION_TYPE_50 = "5.0"
OS_VERSION_TYPE_60 = "6.0"
OS_VERSION_TYPE_62 = "6.2"
OS_VERSION_TYPE_70 = "7.0"
OS_VERSION_CHOICES = (
(OS_VERSION_TYPE_50, '5.0'),
(OS_VERSION_TYPE_60, '6.0'),
(OS_VERSION_TYPE_62, '6.2'),
(OS_VERSION_TYPE_70, '7.0'),
)
@staticmethod
def validateOSVersion(value):
for tuple in OSVersionClass.OS_VERSION_CHOICES:
if value in tuple:
return
raise Exception("OS Version not valid")
class OSTypeClass():
OS_TYPE_TYPE_GNULINUX = "GNU/Linux"
OS_TYPE_TYPE_WINDOWS = "Windows"
OS_TYPE_CHOICES = (
(OS_TYPE_TYPE_GNULINUX, 'GNU/Linux'),
(OS_TYPE_TYPE_WINDOWS, 'Windows'),
)
@staticmethod
def validateOSType(value):
for tuple in OSTypeClass.OS_TYPE_CHOICES:
if value in tuple:
return
raise Exception("OS Type not valid")
|
[
"jenkins@integration.localhost"
] |
jenkins@integration.localhost
|
06e6ec8eeae855acf71b78ba670c6f33f3e7d563
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/cfm_c1742b75736db9d1da0fb731317ab337.py
|
2553211aa330936731e6ce2d5a16da9385c4e481
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 6,156
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Cfm(Base):
"""This object contains the configuration of the CFM protocol.
The Cfm class encapsulates a required cfm resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'cfm'
def __init__(self, parent):
super(Cfm, self).__init__(parent)
@property
def Bridge(self):
"""An instance of the Bridge class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bridge_d8b0c3589e6175e046e1a83cbe6f36b6.Bridge)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bridge_d8b0c3589e6175e046e1a83cbe6f36b6 import Bridge
return Bridge(self)
@property
def EnableOptionalLmFunctionality(self):
"""NOT DEFINED
Returns:
bool
"""
return self._get_attribute('enableOptionalLmFunctionality')
@EnableOptionalLmFunctionality.setter
def EnableOptionalLmFunctionality(self, value):
self._set_attribute('enableOptionalLmFunctionality', value)
@property
def EnableOptionalTlvValidation(self):
"""If true, the CFM protocol will validate optional TLVs present in CFM packets.
Returns:
bool
"""
return self._get_attribute('enableOptionalTlvValidation')
@EnableOptionalTlvValidation.setter
def EnableOptionalTlvValidation(self, value):
self._set_attribute('enableOptionalTlvValidation', value)
@property
def Enabled(self):
"""If true, the CFM protcol is enabled.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def ReceiveCcm(self):
"""If true, the CFM protocol can receive CFM CCMs on this port.
Returns:
bool
"""
return self._get_attribute('receiveCcm')
@ReceiveCcm.setter
def ReceiveCcm(self, value):
self._set_attribute('receiveCcm', value)
@property
def RunningState(self):
"""The current running state of the CFM protocol.
Returns:
str(unknown|stopped|stopping|starting|started)
"""
return self._get_attribute('runningState')
@property
def SendCcm(self):
"""If true, the CFM protocol can send CFM CCMs from this port.
Returns:
bool
"""
return self._get_attribute('sendCcm')
@SendCcm.setter
def SendCcm(self, value):
self._set_attribute('sendCcm', value)
@property
def SuppressErrorsOnAis(self):
"""If true, the errors on AIS are suopressed.
Returns:
bool
"""
return self._get_attribute('suppressErrorsOnAis')
@SuppressErrorsOnAis.setter
def SuppressErrorsOnAis(self, value):
self._set_attribute('suppressErrorsOnAis', value)
def update(self, EnableOptionalLmFunctionality=None, EnableOptionalTlvValidation=None, Enabled=None, ReceiveCcm=None, SendCcm=None, SuppressErrorsOnAis=None):
"""Updates a child instance of cfm on the server.
Args:
EnableOptionalLmFunctionality (bool): NOT DEFINED
EnableOptionalTlvValidation (bool): If true, the CFM protocol will validate optional TLVs present in CFM packets.
Enabled (bool): If true, the CFM protcol is enabled.
ReceiveCcm (bool): If true, the CFM protocol can receive CFM CCMs on this port.
SendCcm (bool): If true, the CFM protocol can send CFM CCMs from this port.
SuppressErrorsOnAis (bool): If true, the errors on AIS are suopressed.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def Start(self):
"""Executes the start operation on the server.
Starts the CFM protocol on a port or group of ports.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the CFM protocol on a port or group of ports.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
3093186e46c96765d0b51554468a7761c5484e8e
|
a72e79b8caa43e973e7d7ecb7ffdaba15314bb9f
|
/server/wtpodcast2/feeds/whatsnew/urls.py
|
d16a486de32ebec8b741e4bcd3163578dee8596f
|
[] |
no_license
|
crgwbr/wt-podcast2
|
2e4be9a0ffa8675d8283f3d0cc16adc799acac68
|
a2dfb178b5e4c3e9ac5ab9ef7c13669caf50129c
|
refs/heads/master
| 2022-12-24T07:42:47.599582
| 2020-10-08T15:35:52
| 2020-10-08T15:35:52
| 266,866,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.urls import path
from . import views
app_name = 'whatsnew'
urlpatterns = [
path('feed.rss', views.feed_rss, name='feed_rss'),
]
|
[
"crgwbr@gmail.com"
] |
crgwbr@gmail.com
|
f7a4a5e6467ee184d150cffb9fae09e625703666
|
1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a
|
/01.jump to python/02.Data Science/1. collection/6. App/Scheduler_example.py
|
25345e1d5cf5e270c0a09dd38d9fcbcdd11abc17
|
[] |
no_license
|
wql7654/bigdata_exam
|
f57c8b475690cbc5978009dbf8008bedff602e2a
|
c07ee711bb84407428ba31165185b9607b6825e8
|
refs/heads/master
| 2023-04-07T00:50:59.563714
| 2021-05-25T02:46:43
| 2021-05-25T02:46:43
| 180,915,985
| 0
| 0
| null | 2023-03-25T01:08:09
| 2019-04-12T02:36:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 998
|
py
|
import threading
import time
g_Balcony_windows=False
g_AI_Mode=False
def updata_scheduler():
global g_Balcony_windows
while True:
if g_AI_Mode == False:
continue
else:
time.sleep(5)
g_Balcony_windows=not g_Balcony_windows
t= threading.Thread(target=updata_scheduler)
t.daemon=True
t.start()
while True:
print("메뉴를 선택하세요")
print("1. 장비 상태 조회")
print("2. 인공지능 모드 변경")
print("3. 종료")
menu_num= int(input("메뉴 입력: "))
if(menu_num==1):
print("발코니(베란다) 창문: ",end='')
if g_Balcony_windows==True:
print("열림")
else:
print("닫힘")
elif(menu_num==2):
print("인공지능 모드: ", end='')
g_AI_Mode=not g_AI_Mode
if g_AI_Mode==True:
print("작동")
else:
print("정지")
else:
break
|
[
"studerande5@gmail.com"
] |
studerande5@gmail.com
|
0864304fb6f9996499fcb687bf16c415b3d12c7e
|
938d5d26c0346316a10a74520b7e30b1bb1f6893
|
/oncopolicy/utils/generic.py
|
4a1dcb70ea43341de423c68976e0cc57c3119a36
|
[
"MIT"
] |
permissive
|
yala/Tempo
|
46fe0da5a6e2e1a8b9bc855851e7ff9a3ab63bd6
|
bf3e0e78d64869bb2079c582a4a35982f78386ad
|
refs/heads/main
| 2023-04-17T07:04:34.697607
| 2022-01-13T21:03:04
| 2022-01-13T21:03:04
| 419,388,269
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,560
|
py
|
import datetime
import hashlib
import numpy as np
from copy import deepcopy
import torch
import pdb
INVALID_DATE_STR = "Date string not valid! Received {}, and got exception {}"
ISO_FORMAT = '%Y-%m-%d %H:%M:%S'
CGMH_ISO_FORMAT ='%Y%m%d'
DAYS_IN_YEAR = 365
DAYS_IN_MO = 30
MAX_MO_TO_CANCER = 1200
MIN_MO_TO_CANCER = 3
MAX_PREFERNCES = 10.0
MIN_PREFERNCES = 0
EPSILON = 1e-3
AVG_MOMENTUM = 0.95
NUM_DIM_AUX_FEATURES = 7 ## Deprecated
class AverageMeter():
def __init__(self):
self.avg = 0
self.first_update = True
def reset(self):
self.avg = 0
self.first_update = True
def update(self, val_tensor):
val = val_tensor.item()
if self.first_update:
self.avg = val
self.first_update = False
else:
self.avg = (AVG_MOMENTUM * self.avg) + (1-AVG_MOMENTUM) * val
assert self.avg >= 0 and val >= 0
def get_aux_tensor(tensor, args):
## use of auxillary features for screen is deprecated
return torch.zeros([tensor.size()[0], NUM_DIM_AUX_FEATURES]).to(tensor.device)
def to_numpy(tensor):
return tensor.cpu().numpy()
def to_tensor(arr, device):
return torch.Tensor(arr).to(device)
def sample_preference_vector(batch_size, sample_random, args):
if sample_random:
dist = torch.distributions.uniform.Uniform(MIN_PREFERNCES, MAX_PREFERNCES)
preferences = dist.sample([batch_size, len(args.metrics), 1])
else:
preferences = torch.ones(batch_size, len(args.metrics), 1)
preferences *= torch.tensor(args.fixed_preference).unsqueeze(0).unsqueeze(-1)
preferences = preferences + EPSILON
preferences = (preferences / (preferences).sum(dim=1).unsqueeze(-1))
return preferences.to(args.device)
def normalize_dictionary(dictionary):
'''
Normalizes counts in dictionary
:dictionary: a python dict where each value is a count
:returns: a python dict where each value is normalized to sum to 1
'''
num_samples = sum([dictionary[l] for l in dictionary])
for label in dictionary:
dictionary[label] = dictionary[label]*1. / num_samples
return dictionary
def parse_date(iso_string):
'''
Takes a string of format "YYYY-MM-DD HH:MM:SS" and
returns a corresponding datetime.datetime obj
throws an exception if this can't be done.
'''
try:
return datetime.datetime.strptime(iso_string, ISO_FORMAT)
except Exception as e:
raise Exception(INVALID_DATE_STR.format(iso_string, e))
def md5(key):
'''
returns a hashed with md5 string of the key
'''
return hashlib.md5(key.encode()).hexdigest()
def pad_array_to_length(arr, pad_token, max_length):
arr = arr[:max_length]
return np.array( arr + [pad_token]* (max_length - len(arr)))
def fast_forward_exam_by_one_time_step(curr_exam, NUM_DAYS_IN_TIME_STEP):
exam = deepcopy(curr_exam)
est_date_of_last_followup = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_YEAR * curr_exam['years_to_last_followup']))
est_date_of_cancer = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_MO * curr_exam['months_to_cancer']))
exam['date'] = curr_exam['date'] + datetime.timedelta(days=int(NUM_DAYS_IN_TIME_STEP))
exam['years_to_last_followup'] = (est_date_of_last_followup - exam['date']).days / DAYS_IN_YEAR
exam['months_to_cancer'] = (est_date_of_cancer - exam['date']).days / DAYS_IN_MO
exam['has_cancer'] = exam['months_to_cancer'] < MIN_MO_TO_CANCER
exam['time_stamp'] = curr_exam['time_stamp'] + 1
return exam
|
[
"adamyala@csail.mit.edu"
] |
adamyala@csail.mit.edu
|
79a50b733533cd6299691b654d5ce900ae38596f
|
de4449e4fbd2972a5a7e775e3a3c7a187ef86899
|
/ubiops/models/pipeline_request_deploment_request.py
|
4cab9039fd3b21f5a7110af758c322d56e311bb0
|
[
"Apache-2.0"
] |
permissive
|
egutierrez-ar/client-library-python
|
03325cc1d4c3e949187889ceb404a08660a7f418
|
94177e5f175263bce645c15a171e54690b1e254f
|
refs/heads/master
| 2023-01-22T23:41:40.274718
| 2020-11-19T07:34:36
| 2020-11-19T07:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,840
|
py
|
# coding: utf-8
"""
UbiOps
Client Library to interact with the UbiOps API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ubiops.configuration import Configuration
class PipelineRequestDeplomentRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'request_id': 'str',
'pipeline_object': 'str',
'success': 'bool',
'request_data': 'object',
'result': 'object',
'error_message': 'str'
}
attribute_map = {
'request_id': 'request_id',
'pipeline_object': 'pipeline_object',
'success': 'success',
'request_data': 'request_data',
'result': 'result',
'error_message': 'error_message'
}
def __init__(self, request_id=None, pipeline_object=None, success=None, request_data=None, result=None, error_message=None, local_vars_configuration=None): # noqa: E501
"""PipelineRequestDeplomentRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._request_id = None
self._pipeline_object = None
self._success = None
self._request_data = None
self._result = None
self._error_message = None
self.discriminator = None
self.request_id = request_id
self.pipeline_object = pipeline_object
self.success = success
self.request_data = request_data
self.result = result
self.error_message = error_message
@property
def request_id(self):
"""Gets the request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this PipelineRequestDeplomentRequest.
:param request_id: The request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def pipeline_object(self):
"""Gets the pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._pipeline_object
@pipeline_object.setter
def pipeline_object(self, pipeline_object):
"""Sets the pipeline_object of this PipelineRequestDeplomentRequest.
:param pipeline_object: The pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pipeline_object is None: # noqa: E501
raise ValueError("Invalid value for `pipeline_object`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
pipeline_object is not None and len(pipeline_object) < 1):
raise ValueError("Invalid value for `pipeline_object`, length must be greater than or equal to `1`") # noqa: E501
self._pipeline_object = pipeline_object
@property
def success(self):
"""Gets the success of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The success of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this PipelineRequestDeplomentRequest.
:param success: The success of this PipelineRequestDeplomentRequest. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and success is None: # noqa: E501
raise ValueError("Invalid value for `success`, must not be `None`") # noqa: E501
self._success = success
@property
def request_data(self):
"""Gets the request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: object
"""
return self._request_data
@request_data.setter
def request_data(self, request_data):
"""Sets the request_data of this PipelineRequestDeplomentRequest.
:param request_data: The request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:type: object
"""
self._request_data = request_data
@property
def result(self):
"""Gets the result of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The result of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: object
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this PipelineRequestDeplomentRequest.
:param result: The result of this PipelineRequestDeplomentRequest. # noqa: E501
:type: object
"""
self._result = result
@property
def error_message(self):
"""Gets the error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this PipelineRequestDeplomentRequest.
:param error_message: The error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
self._error_message = error_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PipelineRequestDeplomentRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PipelineRequestDeplomentRequest):
return True
return self.to_dict() != other.to_dict()
|
[
"sascha.vanweerdenburg@dutchanalytics.com"
] |
sascha.vanweerdenburg@dutchanalytics.com
|
95f6713504bf13a0bf73502b797efe5295597a01
|
bcb71f3ad0196709d462330a60801d5f8ec92ea6
|
/backend/blog/models.py
|
52046c2bc34be06eb996c26a98cfd002333212d7
|
[
"BSD-3-Clause"
] |
permissive
|
lautarianoo/lautacademy
|
c7a7e84958fd6209415c16a0957a7e12449a9afc
|
beec082bdffe8c773fcec51974a687aced278a76
|
refs/heads/master
| 2023-05-28T20:22:22.718962
| 2021-06-11T17:19:32
| 2021-06-11T17:19:32
| 364,965,320
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,201
|
py
|
from ckeditor_uploader.fields import RichTextUploadingField
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from ckeditor.fields import RichTextField
from mptt.models import MPTTModel, TreeForeignKey
from django.dispatch import receiver
from django.db.models.signals import post_save
from backend.utils.transliteration import transliteration_rus_eng
from backend.utils.send_mail import send_mail_user_post
class BlogCategory(MPTTModel):
"""Класс модели категорий сетей"""
name = models.CharField("Категория", max_length=50)
published = models.BooleanField("Опубликовать?", default=True)
parent = TreeForeignKey(
'self',
verbose_name="Родительская категория",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children')
slug = models.SlugField(max_length=100, blank=True, null=True, unique=True)
description = models.TextField("Description", max_length=300, default="")
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
def __str__(self):
return self.name
class Tag(models.Model):
"""Класс модели тегов"""
name = models.CharField("Тег", max_length=50, unique=True, null=True)
slug = models.SlugField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Тег"
verbose_name_plural = "Теги"
def __str__(self):
return self.name
class Post(models.Model):
"""Класс модели поста"""
author = models.ForeignKey(
User,
verbose_name="Автор",
on_delete=models.CASCADE)
title = models.CharField("Тема", max_length=500)
mini_text = models.TextField("Краткое содержание", max_length=5000)
text = models.TextField("Полное содержание", max_length=10000000)
created_date = models.DateTimeField("Дата создания", auto_now_add=True)
published_date = models.DateTimeField("Дата публикации", blank=True, null=True)
image = models.ImageField("Изображение", upload_to="blog/", blank=True)
tag = models.ManyToManyField(Tag, verbose_name="Тег", blank=True)
category = models.ForeignKey(
BlogCategory,
verbose_name="Категория",
blank=True,
null=True,
on_delete=models.SET_NULL)
published = models.BooleanField("Опубликовать?", default=True)
viewed = models.IntegerField("Просмотрено", default=0)
slug = models.SlugField(max_length=500, blank=True, null=True, unique=True)
description = models.TextField("Description", max_length=300, default="", null=True)
class Meta:
verbose_name = "Новость"
verbose_name_plural = "Новости"
ordering = ["-created_date"]
def publish(self):
self.published_date = timezone.now()
self.save()
def get_category_description(self):
return self.category.description
def get_absolute_url(self):
return reverse("single_post", kwargs={"category": self.category.slug, "slug": self.slug})
def save(self, *args, **kwargs):
self.slug = transliteration_rus_eng(self.title) + '-' + str(self.id)
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Comment(MPTTModel):
"""Модель коментариев к новостям"""
user = models.ForeignKey(User, verbose_name="Пользователь", on_delete=models.CASCADE)
post = models.ForeignKey(Post, verbose_name="Новость", on_delete=models.CASCADE)
text = models.TextField("Сообщение", max_length=2000)
date = models.DateTimeField("Дата", auto_now_add=True)
update = models.DateTimeField("Изменен", auto_now=True)
parent = TreeForeignKey(
"self",
verbose_name="Родительский комментарий",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children')
published = models.BooleanField("Опубликовать?", default=True)
class Meta:
verbose_name = "Комментарий"
verbose_name_plural = "Комментарии"
def __str__(self):
return "{} - {}".format(self.user, self.post)
class SpySearch(models.Model):
"""Модель отслеживания запросов поиска"""
record = models.CharField("Запрос", max_length=1000)
counter = models.PositiveIntegerField("Количество запросов", default=0)
class Meta:
verbose_name = "Запрос"
verbose_name_plural = "Запросы"
def __str__(self):
return "{}".format(self.record)
@receiver(post_save, sender=Post)
def create_user_post(sender, instance, created, **kwargs):
"""Отправка сообщения о предложенной статье на email"""
if created:
send_mail_user_post(instance)
|
[
"neonchick1"
] |
neonchick1
|
4e75a52bbb36cbac6858c29d1ab2d433f1f7071e
|
169d809f45dedcaa3c7b1b49912d8b025abe18d9
|
/challenge251_easy.py
|
5d902f9cf7b2c9832252475e7fc7bf3834a08af4
|
[] |
no_license
|
bermec/challenges
|
8a82d1d38d1ed1a0fc3f258443bc0054efc977a6
|
9fb092f20f12b4eaa808e758f00f482a49346c88
|
refs/heads/master
| 2021-10-08T05:05:56.803332
| 2018-12-08T00:20:20
| 2018-12-08T00:20:20
| 109,448,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
'''
Nonograms, also known as Hanjie, Picross or Griddlers, are picture logic puzzles in
which cells in a grid must be colored or left blank according to numbers at the
side of the grid to reveal a hidden picture. In this puzzle type, the numbers are a
form of discrete tomography that measures how many unbroken lines of filled-in
squares there are in any given row or column.
In a Nonogram you are given the number of elements in the rows and columns. A row/column
where containing no element has a '0' all other rows/columns will have at least one number.
Each number in a row/column represent sets of elements next to each other.
If a row/column have multiple sets, the declaration of that row/column will have multiple
numbers. These sets will always be at least 1 cell apart.
An example
2 1 1
1 1 1 2 1
2 * *
1 2 * * *
0
2 1 * * *
2 * *
Input description
Today you will receive an image in ASCII with ' ' being empty and '*' being full.
The number of rows and columns will always be a multiple of 5.
*
**
* *
* *
*****
Output description
Give the columns and rows for the input
Columns:
1 1
1 2 1 1 5
Rows:
1
2
1 1
1 1
5
Ins
1
*
/|
/ |
/ |
*---*
2
/\ #
/**\#
/****\
/******\
/--------\
| |
| || # |
| || # |
| || |
*------*
Bonus
Place the columns and rows in a grid like you would give to a puzzler
1 1
1 2 1 1 5
1
2
1 1
1 1
5
'''
pattern = ''' *
/|
/ |
/ |
*---*'''
pattern = pattern.splitlines()
output = []
import re
for x in range(0, len(pattern)):
print()
ans = re.findall('\S\S\S\S\S|\S\S|\S', pattern[x])
temp = []
for item in ans:
len_item = len(item)
temp.append(str(len_item))
output.append(temp)
temp = []
N = len(output)
b = ''
c = []
for lst in output:
for item in lst:
b += item
b = b.rjust(2, ' ')
c.append(b)
b = ''
d = ' '
e = []
# width
M = len(c[0])
for x in range(0, M):
for y in range(0, len(c)):
d += c[y][x] + ' '
e.append(d)
print(d)
d = ' '
for x in range(0, N):
temp = c[x][0]
temp2 = c[x][1]
print(c[x][0], end='')
print('{: >2}'.format(c[x][1]))
|
[
"rog@pynguins.com"
] |
rog@pynguins.com
|
2982b152f2ef0916c17ae223e733483d0f455558
|
fa9cc9cc469a3f0c5bdc0bc4e562dbbd3ff7e465
|
/messages/RequestCloudMessage.py
|
cfd12e7df3126a5a7572dd9613f73bd6e7fc77a3
|
[
"MIT"
] |
permissive
|
zadjii/nebula
|
ddd86ea30791b46b2a1aeb000ae5dfea9a496168
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
refs/heads/master
| 2021-01-24T17:08:30.607634
| 2018-09-18T00:35:36
| 2018-09-18T00:35:36
| 36,847,552
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
# last generated 2016-12-30 19:27:53.981000
from messages import BaseMessage
from msg_codes import REQUEST_CLOUD as REQUEST_CLOUD
__author__ = 'Mike'
class RequestCloudMessage(BaseMessage):
def __init__(self, id=None, cloud_uname=None, cname=None, username=None, passw=None):
super(RequestCloudMessage, self).__init__()
self.type = REQUEST_CLOUD
self.id = id
self.cloud_uname = cloud_uname
self.cname = cname
self.username = username
self.passw = passw
@staticmethod
def deserialize(json_dict):
msg = RequestCloudMessage()
msg.id = json_dict['id']
msg.cloud_uname = json_dict['cloud_uname']
msg.cname = json_dict['cname']
msg.username = json_dict['username']
msg.passw = json_dict['passw']
return msg
|
[
"zadjii@gmail.com"
] |
zadjii@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.