blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1996b3e9a35ebcabee9dfb1f740e7a9d69fd5760 | 0cf3c46500aed987a4760acd4821957c2877c6c9 | /tools/upgrade/ast.py | 6ba70b0ecd977595005abccae9d1a7e02bc69416 | [
"MIT"
] | permissive | ahmed1231234/pyre-check | eca4de113c5346e28c8826a5ff53d3f8ec6c6ddd | aa40c5ea9095eb66ec63c361545b2c51df3e14ff | refs/heads/master | 2022-04-23T11:03:25.798965 | 2020-04-18T18:37:38 | 2020-04-18T18:37:38 | 256,826,159 | 1 | 0 | null | 2020-04-18T18:35:55 | 2020-04-18T18:35:55 | null | UTF-8 | Python | false | false | 1,787 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import logging
import pathlib
from logging import Logger
from typing import Callable
from pyre_extensions import ListVariadic
Ts = ListVariadic("Ts")
LOG: Logger = logging.getLogger(__name__)
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
def verify_stable_ast(file_modifier: Callable[[Ts], None]) -> Callable[[Ts], None]:
# pyre-fixme[2]: Missing parameter annotation for *args
def wrapper(filename: str, *args, **kwargs) -> None:
# AST before changes
path = pathlib.Path(filename)
try:
text = path.read_text()
ast_before = ast.parse(text)
# AST after changes
file_modifier(filename, *args, **kwargs)
new_text = path.read_text()
try:
ast_after = ast.parse(new_text)
# Undo changes if AST does not match
if not ast.dump(ast_before) == ast.dump(ast_after):
LOG.warning(
"Attempted file changes modified the AST in %s. Undoing.",
filename,
)
path.write_text(text)
except Exception as e:
LOG.warning("Could not parse file %s. Undoing.", filename)
LOG.warning(e)
path.write_text(text)
except FileNotFoundError:
LOG.warning("File %s cannot be found, skipping.", filename)
return
return wrapper
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f205428b32562f728bb76ab16080f526548226ee | 7cd6950ab3034cb0cf403ee1b8410bf475360a8d | /venv/bin/schema-salad-tool | e251adb51c1dd93871748bd902611e801bb09b8e | [] | no_license | mr-c/george_murray | ef6d5f77a4f4c0b64cbc64534ce23d7546a3cee0 | 612c68c6b27ed2d8097f1309820ccdbb05530176 | refs/heads/master | 2022-09-20T11:12:58.582547 | 2019-08-15T19:32:34 | 2019-08-15T19:32:34 | 268,844,811 | 0 | 0 | null | 2020-06-02T15:55:27 | 2020-06-02T15:55:26 | null | UTF-8 | Python | false | false | 265 | #!/Users/George1/Documents/GitHub/george_murray/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from schema_salad.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"george.c.murray@maine.edu"
] | george.c.murray@maine.edu | |
fa72589e55f4fe20a47d28a533d5439e1b6ff87c | 2c5b25d0b5d6ba66d013251f93ebf4c642fd787b | /accepted_codes/ZigZag_Conversion/ZigZag Conversion_278768995.py | fb8f43a76f31a8429e8a1102bb46a1a61c7b0a46 | [] | no_license | abhinay-b/Leetcode-Submissions | da8099ac54b5d36ae23db42580064d0f9d9bc63b | d034705813f3f908f555f1d1677b827af751bf42 | refs/heads/master | 2022-10-15T22:09:36.328967 | 2020-06-14T15:39:17 | 2020-06-14T15:39:17 | 259,984,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | class Solution:
def convert(self, s: str, rows: int) -> str:
if rows == 1 or len(s) == 0:
return s
substrLists = ["" for i in range(rows)]
pos = 0
step = -1
for char in s:
substrLists[pos] += char
if pos == 0 or pos == rows - 1:
step *= -1
pos += step
result = ""
for subStr in substrLists:
result += subStr
return(result)
| [
"abhinayb.sssihl@gmail.com"
] | abhinayb.sssihl@gmail.com |
ad0fdfb641c6e94049ce76fdcc83f75b4d70cc21 | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/tests/series/test_internals.pyi | f4ba1fc107185d5351a176a5bf578b51da1198f9 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 1,056 | pyi | # Stubs for pandas.tests.series.test_internals (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
# pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg
# pylint: disable=super-init-not-called,abstract-method,redefined-builtin
class TestSeriesInternals:
def test_convert(self) -> None:
...
def test_convert_no_arg_error(self) -> None:
...
def test_convert_preserve_bool(self) -> None:
...
def test_convert_preserve_all_bool(self) -> None:
...
def test_constructor_no_pandas_array(self) -> None:
...
def test_astype_no_pandas_dtype(self) -> None:
...
def test_from_array(self) -> None:
...
def test_from_list_dtype(self) -> None:
...
def test_hasnans_unchached_for_series() -> None:
...
def test_put_deprecated() -> None:
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
583c3151de04b103a70ef84b46909cbd243bdf38 | 9a8fe99c7316dfce343be81d2c3c1a6c4f22572c | /set89.py | 98de78eac2b9ee81cbd42fa33eff655c83fe0589 | [] | no_license | Srija-U/codekatabeginner | 5e4d540484529dbafada04d3eac96eab7f98a693 | 8d088e04de1d48d9befb975697e9121f06bb164a | refs/heads/master | 2020-04-30T00:58:51.445394 | 2019-07-01T15:43:05 | 2019-07-01T15:43:05 | 176,516,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import math
l=[int(i) for i in input().split()]
p=l[0]*l[1]
r=math.sqrt(p)
if(r-math.floor(r)==0):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | Srija-U.noreply@github.com |
c40fdba0ebc728e600f6cb077840adff8ec86a16 | 5dfed5b8fbcb2a62af3aab3beee299a8405ad50b | /ch05-视频/5.VideoCapture.py | 0be5f3af2bce0deef2d782be92053f349c6a26f6 | [
"MIT"
] | permissive | zgle-fork/OpenCV-Python-Tutorial | 7de5a7eda667401b3c7ac0e9306c0b0650bb459f | 5a42b32de208a7f11ec9d04880f4b00e8986a0e5 | refs/heads/master | 2023-03-02T11:01:04.984257 | 2021-02-11T19:10:14 | 2021-02-11T19:10:14 | 268,380,833 | 0 | 0 | MIT | 2020-05-31T23:18:58 | 2020-05-31T23:18:58 | null | UTF-8 | Python | false | false | 2,326 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 21:06:22 2014
@author: duan
"""
'''
注意 当你的程序报错时 你 先检查的是你的摄像头是否能够在其他程 序中正常工作 比如 linux 下的 Cheese 。
'''
import numpy as np
import cv2
cap = cv2.VideoCapture(0) # 一般的笔 本电脑 有内置摄像头。所以参数就是 0。你可以 设置成 1 或 者其他的来 择别的摄像头
'''
你可以使用函数 cap.get(propId) 来获得 的一些参数信息。
propId 可以是 0 到 18 之 的任何整数。
其中的一些值可以使用 cap.set(propId,value) 来修改 value 就是 你想 置成的新值。
例如 我可以使用 cap.get(3) cv2.CAP_PROP_FRAME_WIDTH和 cap.get(4) cv2.CAP_PROP_FRAME_HEIGHT来查看每一帧的宽和高。
默认情况下得到的值是 640X480。但是我可以使用 ret=cap.set(3,320) 和 ret=cap.set(4,240) 来把宽和高改成 320X240。
'''
# ret=cap.set(3,320)
# ret=cap.set(4,240)
# ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)#避免计算量过大
# ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 270)#
#等比缩放
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)#4 ,720
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)#3 ,1280
frame_height=int(480/frame_width*frame_height)#270
ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)#高
ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
# while (True):
while cap.isOpened(): # 检查是否成功初始化,否则就 使用函数 cap.open()
# Capture frame-by-frame
ret, frame = cap.read() # ret 返回一个布尔值 True/False
# print('frame shape:',frame.shape)#(720, 1280, 3)
frame = cv2.flip(frame, flipCode=1) # 左右翻转,使用笔记本电脑摄像头才有用。
# flipCode:翻转方向:1:水平翻转;0:垂直翻转;-1:水平垂直翻转
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame', gray)
cv2.setWindowTitle('frame', 'COLOR_BGR2GRAY')
# Property=cv2.getWindowProperty('frame',0)#无用
# if cv2.waitKey(1) & 0xFF == ord('q'):#不行
# break
key = cv2.waitKey(delay=10)
if key == ord("q"):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"play4fun@foxmail.com"
] | play4fun@foxmail.com |
292ab22fc8c91f9f3a53b729047e4651abdbac4f | 4ad809420a3cd82199b31fcb6033ad6b28c5ac60 | /rustici_engine/models/xapi_interaction_component.py | 85c0f932ab8c6ed4102f496b23b51da596ac6ce0 | [] | no_license | Myagi/python-rustici-engine-api | 2e4eb21f01b156551a1f4d747aea466dec22f30c | 20684845817cb9790b3bfc9be3db515f7ad5b0ee | refs/heads/master | 2022-03-30T12:26:44.825580 | 2020-02-03T06:34:12 | 2020-02-03T06:34:12 | 237,883,063 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | # coding: utf-8
"""
Rustici Engine API
Rustici Engine API # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class XapiInteractionComponent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'description': 'dict(str, str)'
}
attribute_map = {
'id': 'id',
'description': 'description'
}
def __init__(self, id=None, description=None): # noqa: E501
"""XapiInteractionComponent - a model defined in Swagger""" # noqa: E501
self._id = None
self._description = None
self.discriminator = None
self.id = id
if description is not None:
self.description = description
@property
def id(self):
"""Gets the id of this XapiInteractionComponent. # noqa: E501
:return: The id of this XapiInteractionComponent. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this XapiInteractionComponent.
:param id: The id of this XapiInteractionComponent. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def description(self):
"""Gets the description of this XapiInteractionComponent. # noqa: E501
:return: The description of this XapiInteractionComponent. # noqa: E501
:rtype: dict(str, str)
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this XapiInteractionComponent.
:param description: The description of this XapiInteractionComponent. # noqa: E501
:type: dict(str, str)
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(XapiInteractionComponent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XapiInteractionComponent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"s.kitchell@live.com"
] | s.kitchell@live.com |
312a91bd56204674559e4af96b69bcf70afeae26 | 492e956cbc3f2d9af13b2b437760fba0451c3333 | /setup.py | 95956c2a83a15ec25d433e0f06201a46b22caaa7 | [
"BSD-3-Clause"
] | permissive | Ademan/markupsafe | 40c6deb4b3035df61b65ce293f300d6b8433c045 | 6620b980d299b00d337e998f6cd13a800d51bcf9 | refs/heads/master | 2021-01-18T13:22:21.023640 | 2011-02-17T21:56:32 | 2011-02-17T21:56:32 | 1,380,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | import os
import sys
from setuptools import setup, Extension, Feature
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
# fail safe compilation shamelessly stolen from the simplejson
# setup.py file. Original author: Bob Ippolito
speedups = Feature(
'optional C speed-enhancement module',
standard=True,
ext_modules = [
Extension('markupsafe._speedups', ['markupsafe/_speedups.c']),
],
)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
extra = {}
if sys.version_info >= (3, 0):
extra['use_2to3'] = True
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def echo(msg=''):
sys.stdout.write(msg + '\n')
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
def run_setup(with_binary):
features = {}
if with_binary:
features['speedups'] = speedups
setup(
name='MarkupSafe',
version='0.10',
url='http://dev.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='Implements a XML/HTML/XHTML Markup safe string for Python',
long_description=readme,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['markupsafe'],
test_suite='markupsafe.tests.suite',
include_package_data=True,
cmdclass={'build_ext': ve_build_ext},
features=features,
**extra
)
try:
run_setup(True)
except BuildFailed:
LINE = '=' * 74
BUILD_EXT_WARNING = 'WARNING: The C extension could not be compiled, speedups are not enabled.'
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Failure information, if any, is above.')
echo('Retrying the build without the C extension now.')
echo()
run_setup(False)
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Plain-Python installation succeeded.')
echo(LINE)
| [
"armin.ronacher@active-4.com"
] | armin.ronacher@active-4.com |
2be1f29cb247b9a78bccf284d46cc677e921eb76 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/missing_data/test_missing_data_air_passengers_None_Median.py | 2a181eddd4b0f9ad5e0f81313d275c09db13c348 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 128 | py | import tests.missing_data.test_missing_data_air_passengers_generic as gen
gen.test_air_passengers_missing_data(None, 'Median')
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
3a9d64a9ea107918ee80b2d18620eca72ba779b1 | 76c3262a1ed4e6cbbf5008e8fc79c917035fe181 | /src/mysite/posts/views.py | b12464ced838be501e90822580b6b41435a34213 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"bzip2-1.0.6",
"Python-2.0",
"TCL",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] | permissive | aiegoo/django | ba6ee6be02b4e0fdf2fce007cb3ef739974ade35 | 2f508a318edd26403509a61eb44e99fda8b7ed64 | refs/heads/master | 2023-01-12T21:56:16.202262 | 2021-06-08T09:27:44 | 2021-06-08T09:27:44 | 211,546,261 | 0 | 0 | MIT | 2022-12-26T20:15:56 | 2019-09-28T18:41:35 | Tcl | UTF-8 | Python | false | false | 164 | py | from django.http import HttpRequest
from django.shortcuts import render
# Create your views here.
def post_home(request):
return HttpRequest("<h1>hello</h1>") | [
"eozz21@gmail.com"
] | eozz21@gmail.com |
ae88ca665498f4b7533b6dac23ecbf987436a17f | 52b79e4cd1e26969a3ebb3bca8620519071bea98 | /answers/17_serialization/task_17_3.py | f5210b54e2ac9f160565ac6b077cafd54537708e | [] | no_license | hariram32/pyneng-answers-en | 631bc149b8a219a2de86de82681ffba3d1ff30ee | 84b7240b00d3a4ab9011952db662f716d1cd31b8 | refs/heads/main | 2023-03-16T00:12:38.954431 | 2021-03-09T15:40:10 | 2021-03-09T15:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | # -*- coding: utf-8 -*-
"""
Task 17.3
Create a function parse_sh_cdp_neighbors that processes the output of
the show cdp neighbors command.
The function expects, as an argument, the output of the command
as a single string (not a filename).
The function should return a dictionary that describes the connections between devices.
For example, if the following output was passed as an argument:
R4>show cdp neighbors
Device ID Local Intrfce Holdtme Capability Platform Port ID
R5 Fa 0/1 122 R S I 2811 Fa 0/1
R6 Fa 0/2 143 R S I 2811 Fa 0/0
The function should return a dictionary like this:
{'R4': {'Fa 0/1': {'R5': 'Fa 0/1'},
'Fa 0/2': {'R6': 'Fa 0/0'}}}
Interfaces must be written with a space. That is, so Fa 0/0, and not so Fa0/0.
Check the function on the contents of the sh_cdp_n_sw1.txt file
"""
import re
def parse_sh_cdp_neighbors(command_output):
regex = re.compile(
r"(?P<r_dev>\w+) +(?P<l_intf>\S+ \S+)"
r" +\d+ +[\w ]+ +\S+ +(?P<r_intf>\S+ \S+)"
)
connect_dict = {}
l_dev = re.search(r"(\S+)[>#]", command_output).group(1)
connect_dict[l_dev] = {}
for match in regex.finditer(command_output):
r_dev, l_intf, r_intf = match.group("r_dev", "l_intf", "r_intf")
connect_dict[l_dev][l_intf] = {r_dev: r_intf}
return connect_dict
if __name__ == "__main__":
with open("sh_cdp_n_sw1.txt") as f:
print(parse_sh_cdp_neighbors(f.read()))
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
23e96be12a0904803e10a2ada162a77caaea4993 | 1850d2222b504bfa3c2390dc6fc186b3260334d0 | /src/pbhla/dictionary.py | 32ab51481614f77e383c8f8ee6393b79722894c4 | [] | no_license | la0hu2006/HlaTools | 2f6964645615c6c35517f064111b362407701dea | 082e45cd1cbdad941f0df59d71a0ca59bfdabf0c | refs/heads/master | 2021-01-18T21:42:31.258878 | 2016-08-29T18:50:11 | 2016-08-29T18:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,290 | py | import os, re, csv, logging
from pbcore.io.FastaIO import FastaReader
from pbhla.io.BlasrIO import BlasrReader, record_to_string
from pbhla.io.SamIO import SamReader
from pbhla.utils import get_base_sequence_name
log = logging.getLogger()
def create_amp_assem_reference( m1_file, reference=None ):
log.info('Parsing Blasr M1 results from "{0}"'.format( m1_file ))
results = {}
for record in BlasrReader( m1_file ):
qname = get_base_sequence_name( record.qname )
locus = qname.split('_')[1]
if qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[qname] = reference[locus]
else:
results[qname] = locus
log.info('Finished reading Blasr results')
return results
def create_m1_reference( m1_file, reference=None ):
log.info('Parsing Blasr M1 results from "{0}"'.format( m1_file ))
results = {}
for record in BlasrReader( m1_file ):
qname = get_base_sequence_name( record.qname )
tname = get_base_sequence_name( record.tname )
if qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[qname] = reference[tname]
else:
results[qname] = tname
log.info('Finished reading Blasr results')
return results
def create_m5_reference( m5_file ):
log.info('Parsing Blasr M5 results from "{0}"'.format( m5_file ))
results = {}
diffs = {}
for record in BlasrReader( m5_file ):
qname = get_base_sequence_name( record.qname )
tname = get_base_sequence_name( record.tname )
diff_count = int(record.nmis) + int(record.nins) + int(record.ndel)
if qname not in diffs:
results[qname] = tname
diffs[qname] = diff_count
elif diffs[qname] > diff_count:
results[qname] = tname
diffs[qname] = diff_count
log.info('Finished reading Blasr results')
return results
def create_sam_reference( sam_file, reference=None ):
log.info('Parsing SAM alignments from "{0}"'.format(sam_file))
results = {}
for record in SamReader(sam_file):
name = get_base_sequence_name( record.rname )
if record.qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( record.qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[record.qname] = reference[name]
else:
results[record.qname] = name
log.info('Finished reading SAM file results')
return results
def create_phased_reference( phased_fofn ):
log.info('Parsing Phased FOFN alignments from "{0}"'.format(phased_fofn))
results = {}
with open(phased_fofn, 'r') as handle:
for line in handle:
fasta_path = line.strip()
fasta_file = os.path.basename( fasta_path )
contig_name = fasta_file.split('.')[0] + '_cns'
for record in FastaReader(fasta_path):
name = record.name.split()[0]
results[name] = contig_name
log.info('Finished reading phased FOFN results')
return results
def filter_m5_file( m5_file, filtered_file ):
"""
Filter an M5 alignment file to contain only the alignments with the fewest diffs
"""
log.info('Filtering Blasr M5 results from "{0}"'.format( m5_file ))
selected = {}
diffs = {}
count = 0
for record in BlasrReader( m5_file ):
count += 1
diff_count = int(record.nmis) + int(record.nins) + int(record.ndel)
if record.qname not in diffs:
selected[record.qname] = record
diffs[record.qname] = diff_count
elif diffs[record.qname] > diff_count:
selected[record.qname] = record
diffs[record.qname] = diff_count
log.info('Selected %s records from %s alignments' % (count, len(selected)))
with open( filtered_file, 'w' ) as output:
for record in selected.itervalues():
output.write('%s\n' % record_to_string( record ))
log.info('Finished filtering Blasr results')
| [
"bbowman@pacificbiosciences.com"
] | bbowman@pacificbiosciences.com |
b36a85f206bf2bb3cab6829401349d38f3b51bd1 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D98B/SAFHAZD98BUN.py | a4eef18c6de9d10ed241bd2e4d7eeacdbcf968ba | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,765 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD98BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'DOC', MIN: 1, MAX: 1000, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 999},
{ID: 'PIA', MIN: 0, MAX: 10},
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'RCS', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'SFI', MIN: 0, MAX: 20, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 10},
{ID: 'EQD', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'HAN', MIN: 0, MAX: 10, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'IMD', MIN: 0, MAX: 999, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'PCD', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'DGS', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PAC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 10},
]},
]},
{ID: 'CCI', MIN: 0, MAX: 999, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'CAV', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 10},
]},
{ID: 'MEA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'TEM', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
3857b3e7f9b4eeb9078b49a3de87dabf2611ef10 | bb300c03d353e7752d20909995d2b0e4818d785d | /app1/admin.py | ca0089e5fcc6dd1e454695c7fe3e0900521046e2 | [] | no_license | ksuvarna85/django_unicode | 3fe3340b63e92812c17a379a934fc8d1d3fc91bc | 79d833a3009acf034a194a8daa71b6e8a209f748 | refs/heads/master | 2022-12-13T00:10:59.156715 | 2020-09-12T14:35:53 | 2020-09-12T14:35:53 | 292,186,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from django.contrib.auth.forms import UserCreationForm
from .models import User, Student, Teacher
# Register your models here.
class BaseUserAdmin(UserAdmin):
list_display = ['email', 'is_admin', ]
search_fields = ("email", 'sap_id', 'is_admin', )
readonly_fields = (
'date_joined',
'last_login',
)
filter_horizontal =()
list_filter = ()
fieldsets = ()
class StudentAdmin(UserAdmin):
list_display = ('email','sap_id', 'year')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class TeacherAdmin(UserAdmin):
list_display = ('email', 'qualification', )
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class StudentDisplayAdmin(UserAdmin):
list_display = ['email', 'sap_id', ]
ordering = ['email']
search_fields = ['email']
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.unregister(Group)
admin.site.register(User, BaseUserAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Teacher, TeacherAdmin)
| [
"you@example.com"
] | you@example.com |
b3ae963aeaacd1244bd96e68dca519a28aa5a5d2 | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/backend/pytorch/layer/dot/conv.py | 3c37be58bd4b04de352c0ba95e80282519e3fdcb | [] | no_license | knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | from torch.nn import functional as F
from ....base.layer.dot.conv import BaseConvAPI
class PyTorchConvAPI(BaseConvAPI):
def __init__(self):
BaseConvAPI.__init__(self)
self._ndim2conv = {
1: self.conv1d,
2: self.conv2d,
3: self.conv3d,
}
def conv(self, x, kernel, bias, stride, pad, dilation):
ndim = x.dim() - 2
return self._ndim2conv[ndim](x, kernel, bias, stride, pad, dilation)
def _conv(self, func_name, x, kernel, bias, stride, pad, dilation):
ndim = self.ndim(x) - 2
face = self.shape(kernel)[2:]
stride = self.to_shape(stride, ndim)
dilation = self.to_shape(dilation, ndim)
pre_pad, conv_singles_pad = \
self.unpack_conv_pad_to_singles(face, pad, dilation)
func = getattr(F, func_name)
if ndim == 1:
stride, = stride
conv_singles_pad, = conv_singles_pad
dilation, = dilation
if pre_pad is not None:
x = self.constant_pad(x, pre_pad, 0)
return func(x, kernel, bias, stride, conv_singles_pad, dilation)
def conv1d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv1d', x, kernel, bias, stride, pad, dilation)
def conv2d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv2d', x, kernel, bias, stride, pad, dilation)
def conv3d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv3d', x, kernel, bias, stride, pad, dilation)
| [
"iamknighton@gmail.com"
] | iamknighton@gmail.com |
cea44ccf0270fc4975dcc281347d17019d96aa29 | 2a68ce2f0f47370e2f57b9279cc8e1aab85e26da | /trojsten/results/migrations/0001_squashed_0003_auto_20160608_1143.py | b621f0ef934d8daff3195390d0dd3e897f207a6d | [
"MIT"
] | permissive | trojsten/web | 52007c3d575b21603bf205c1e7294a482eedbf85 | 97b7b3ae3ac46be786bde9c49a2cae6609dbf50f | refs/heads/master | 2023-08-17T23:30:16.857469 | 2023-07-30T16:31:34 | 2023-07-30T16:31:34 | 10,618,952 | 6 | 10 | MIT | 2023-09-04T19:09:09 | 2013-06-11T10:04:10 | Python | UTF-8 | Python | false | false | 5,589 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-08 10:11
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("people", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contests", "0003_category_task"),
("schools", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="FrozenPoints",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"description_points",
models.CharField(max_length=10, verbose_name="body za popis"),
),
("source_points", models.CharField(max_length=10, verbose_name="body za program")),
("sum", models.CharField(max_length=10, verbose_name="body")),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contests.Task",
verbose_name="\xfaloha",
),
),
],
options={
"verbose_name": "Zmrazen\xe9 body za \xfalohu",
"verbose_name_plural": "Zmrazen\xe9 body za \xfalohy",
},
),
migrations.CreateModel(
name="FrozenResults",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"is_single_round",
models.BooleanField(verbose_name="vynecha\u0165 predo\u0161l\xe9 kol\xe1"),
),
(
"has_previous_results",
models.BooleanField(
default=False, verbose_name="zah\u0155\u0148a predo\u0161l\xe9 kol\xe1"
),
),
("time", models.DateTimeField(auto_now_add=True, verbose_name="\u010das")),
(
"category",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contests.Category",
verbose_name="kateg\xf3ria",
),
),
(
"round",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contests.Round",
verbose_name="kolo",
),
),
],
options={
"verbose_name": "Zmrazen\xe1 v\xfdsledkovka",
"verbose_name_plural": "Zmrazen\xe9 v\xfdsledkovky",
},
),
migrations.CreateModel(
name="FrozenUserResult",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("rank", models.IntegerField(verbose_name="poradie")),
("prev_rank", models.IntegerField(blank=True, null=True, verbose_name="poradie")),
("fullname", models.CharField(max_length=500, verbose_name="pln\xe9 meno")),
("school_year", models.IntegerField(verbose_name="ro\u010dn\xedk")),
(
"previous_points",
models.CharField(
max_length=10, verbose_name="body z predo\u0161l\xfdch k\xf4l"
),
),
("sum", models.CharField(max_length=10, verbose_name="suma")),
(
"frozenresults",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="results.FrozenResults",
verbose_name="v\xfdsledkovka",
),
),
(
"original_user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="p\xf4vodn\xfd pou\u017e\xedvate\u013e",
),
),
(
"school",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="schools.School",
verbose_name="\u0161kola",
),
),
(
"task_points",
models.ManyToManyField(
to="results.FrozenPoints", verbose_name="body za \xfalohy"
),
),
],
options={
"verbose_name": "Zmrazen\xfd v\xfdsledok",
"verbose_name_plural": "Zmrazen\xe9 v\xfdsledky",
},
),
]
| [
"mhozza@gmail.com"
] | mhozza@gmail.com |
ef337ec276cc312703505f50f3e225a52769011e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02233/s579323796.py | 2066403e585a327111189ca533edc9056ec5aec9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from functools import lru_cache
n = int(input())
@lru_cache(maxsize=None)
def fib(n):
if n==0 or n==1:
return 1
else:
return fib(n-1)+fib(n-2)
print(fib(n))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
24191139d0b94f103b412d4ed31f6b47eb63484e | df4a7c46c46d1eca6570493b9707bdf64e54f8d3 | /py/35.search-insert-position.py | 33dd984473f03e45395deae0415f989a0023fd70 | [] | no_license | CharmSun/my-leetcode | 52a39bf719c507fb7032ed424fe857ba7340aea3 | 5325a56ba8c40d74d9fef2b19bac63a4e2c44a38 | refs/heads/master | 2023-03-29T06:39:49.614264 | 2021-03-28T16:33:52 | 2021-03-28T16:33:52 | 261,364,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | #
# @lc app=leetcode id=35 lang=python3
#
# [35] Search Insert Position
#
# @lc code=start
from typing import List
## 二分查找: 取左值
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
left = 0
right = len(nums) - 1
while left <= right:
mid = (left + right) // 2
if target <= nums[mid]:
right = mid - 1
else:
left = mid + 1
return left
# @lc code=end
| [
"suncan0812@gmail.com"
] | suncan0812@gmail.com |
6a9004fa89106410d1390d87e6956d939af9fc41 | e6d4a87dcf98e93bab92faa03f1b16253b728ac9 | /algorithms/python/imageSmoother/imageSmoother.py | 710c11b1f7cfe46788763934ba78cc33142fdb3e | [] | no_license | MichelleZ/leetcode | b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f | a390adeeb71e997b3c1a56c479825d4adda07ef9 | refs/heads/main | 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/image-smoother/
# Author: Miao Zhang
# Date: 2021-02-26
class Solution:
def imageSmoother(self, M: List[List[int]]) -> List[List[int]]:
m = len(M)
n = len(M[0])
res = [[0 for _ in range(n)] for _ in range(m)]
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1), (0, 0)]
for i in range(m):
for j in range(n):
cnt = 0
for d in dirs:
x = i + d[0]
y = j + d[1]
if 0 <= x < m and 0 <= y < n:
res[i][j] += M[x][y]
cnt += 1
res[i][j] //= cnt
return res
| [
"zhangdaxiaomiao@163.com"
] | zhangdaxiaomiao@163.com |
bb049e65d59a06ac07b31316aaa604e7231f451e | 0f9a97d48a9f0179bcf1e3d80c08340096eb561e | /ДЗ-10. Словари/E. Самое частое слово.py | 5ab4d26f6bf5e6e5c8e4cc2561b81c51baac99e4 | [] | no_license | dmitryokh/python | 96d8ec8c3f2d3428b90d510a1003aecf102b13d0 | 8efe761412779bed9a7516832d3152843088fa43 | refs/heads/master | 2020-04-24T03:08:42.865813 | 2019-02-20T11:41:52 | 2019-02-20T11:41:52 | 171,661,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | input = open('input.txt', 'r')
text = input.readline()
wordcount = {}
maxword = 0
while text != "":
text = text.split()
for word in text:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
if wordcount[word] > maxword:
maxword = wordcount[word]
text = input.readline()
for word in sorted(wordcount):
if wordcount[word] == maxword:
print(word)
break
| [
"noreply@github.com"
] | dmitryokh.noreply@github.com |
5080faaeaef838a6382729f2b935ab97aebfee4a | 6efc62bc1aa82e09eb1740cb795ddb622d9069a1 | /config/migrations/0002_auto_20181224_2241.py | 740333528a41b62ca7887d99c9f83b889f1f49cc | [] | no_license | EruDev/typeidea | c1147743a2d062cb2b21c7bf98db9377345809ef | 8c8889ac75bd09298b93528d618fdffdae93ea03 | refs/heads/master | 2020-04-12T04:20:06.123801 | 2018-12-28T01:11:27 | 2018-12-28T01:11:27 | 147,651,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Generated by Django 2.0.4 on 2018-12-24 22:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('config', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sidebar',
name='display_type',
field=models.PositiveIntegerField(choices=[(1, '展示'), (0, '隐藏')], default=1, verbose_name='展示类型'),
),
]
| [
"1027926875@qq.com"
] | 1027926875@qq.com |
dca16d10dcceb0ac60b9e637079684ea037c63f1 | a9510540f25112a13a7b10772d8b12df4f80fcf8 | /edsys_sequence/ir_sequence.py | e2b4ddb1b45cec29ac47cbe95b79b227cfb1cc46 | [] | no_license | babarlhr/edsys10 | c291b7eae643bbd25c961e829beca9f5b108845e | 84b43d0ed19145c88fa142e6cf1fa691fa9fedce | refs/heads/master | 2022-03-15T03:03:27.479767 | 2019-11-20T21:22:50 | 2019-11-20T21:22:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,634 | py | from eagle import models, fields, api, _
class ir_sequence(models.Model):
_inherit = 'ir.sequence'
model_id = fields.Many2one("ir.model", 'Model')
#field_id = fields.Many2one('ir.model.fields', 'Field', domain="[('model_id', '=', model_id), ('ttype', '=', 'integer')]")
field_id = fields.Many2one('ir.model.fields', 'Field', domain="[('model_id', '=', model_id)]")
@api.multi
def generate_sequence_button(self):
if self.model_id.model == 'registration' :
reg_ids = self.env['registration'].search([('state','!=', 'done')])
for reg_id in reg_ids :
number_seq = {self.field_id.name : self._next()}
reg_id.write(number_seq)
if self.model_id.model == 'account.voucher' :
vocuher_ids = self.env['account.voucher'].search([('state','=', 'draft')])
for vocuher_id in vocuher_ids :
number_seq = {self.field_id.name : self._next()}
vocuher_id.write(number_seq)
if self.model_id.model == 'account.invoice' :
invoice_ids = self.env['account.invoice'].search([('state','not in', ('paid', 'cancel'))])
for invoice_id in invoice_ids :
number_seq = {self.field_id.name : self._next()}
invoice_id.write(number_seq)
if self.model_id.model == 're.reg.waiting.responce.parents' :
re_reg_ids = self.env['re.reg.waiting.responce.parents'].search([('state','not in', ('re_registration_confirmed', 'tc_expected'))])
for re_reg_id in re_reg_ids :
number_seq = {self.field_id.name : self._next()}
re_reg_id.write(number_seq)
if self.model_id.model == 'trensfer.certificate' :
tc_ids = self.env['trensfer.certificate'].search([('state','not in', ('tc_complete', 'tc_cancel'))])
for tc_id in tc_ids :
number_seq = {self.field_id.name : self._next()}
tc_id.write(number_seq)
if self.model_id.model == 'hr.employee' :
emp_ids = self.env['hr.employee'].search([('employee_state','in', ('probation', 'employee'))])
for emp_id in emp_ids :
#if emp_id.employee_code :
# emp_id.biometric_id = emp_id.employee_code
#number_seq = {self.field_id.name : self._next()}
#emp_id.write(number_seq)
if not emp_id.employee_code :
number_seq = {self.field_id.name : self._next()}
emp_id.write(number_seq)
| [
"rapidgrps@princegroup-bd.com"
] | rapidgrps@princegroup-bd.com |
f83fcd905c084cce98fc955ccf1e309f691d2eec | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /python-scripts/points.py | b8ee9310c52b85f1fc806c4acc7ef631e09df80e | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,073 | py | #!/usr/bin/env python3
from random import randint
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import cm
def set_direction():
while True:
x = randint(-1, 1)
y = randint(-1, 1)
if x != 0 or y != 0:
break
return x, y
class Point:
def __init__(self, x, y, m, color):
self.x = x
self.y = y
self.n = 0
self.m = m
self.color = color
self.dir_x, self.dir_y = set_direction()
self.array = [[self.x], [self.y], [self.color]]
def move(self):
self.x += self.dir_x
self.y += self.dir_y
self.n += 1
if self.x == 6 or self.x == -6:
self.dir_x = self.dir_x * -1
if self.y == 3 or self.y == -3:
self.dir_y = self.dir_y * -1
self.array[0].append(self.x)
self.array[1].append(self.y)
if self.n >= self.m:
self.array[2].append("blue")
else:
self.array[2].append(self.color)
return self
def move_n(self, n):
for _ in range(0, n):
self.move()
return self
def __str__(self):
return f"Point({self.x},{self.y}) #{self.n}"
sim = []
for i in enumerate(range(10)):
p = Point(randint(-5, 5), randint(-2, 2), i[0] * 3, (5 * ["red", "green"])[i[0]],)
s = p.move_n(30).array
sim.append(s)
data = np.array(sim)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlim(-6, 6)
plt.ylim(-3, 3)
plt.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
labelbottom=False,
right=False,
left=False,
labelleft=False,
)
ims = []
for i, num in enumerate(range(0, 30)):
x, y, c = [], [], []
for point in data[:, :3, num]:
x.append(int(point[0]))
y.append(int(point[1]))
c.append(point[2])
print(i, x, y, c)
scat = ax.scatter(x=x, y=y, s=50, c=c)
ims.append([scat])
im_ani = animation.ArtistAnimation(fig, ims, interval=800, repeat_delay=300, blit=True)
plt.show()
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
a9d5499f5da02efa73b4381ff413faa0cdab877e | 98a03799f45384e1bc88674d4fc1a7b14b997673 | /cart/migrations/0005_auto_20210525_1933.py | e3bde4f7811053bc4e318bc7d665d7bc1b2b5638 | [] | no_license | munyuaDeveloper/07Ecommerce-backend | cda868260b56b044bdecf595b25492f5a3711e5d | cc73f4d33a7d6cdb823e9a719375f4c57ac49872 | refs/heads/main | 2023-08-11T19:42:42.169703 | 2021-09-21T22:58:27 | 2021-09-21T22:58:27 | 366,691,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Generated by Django 3.2.3 on 2021-05-25 19:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0004_auto_20210525_1218'),
]
operations = [
migrations.AlterModelOptions(
name='shoppingcartitem',
options={'verbose_name': 'shopping cart Items', 'verbose_name_plural': 'shopping cart Items'},
),
migrations.RemoveField(
model_name='orderinfo',
name='order_description',
),
]
| [
"mwangyjose@gmail.com"
] | mwangyjose@gmail.com |
f1ce3d0ba8ae07110c346dda91edaa7356e11913 | d2df82be0a37c9fde9a1ecee34fbf311fd8b2840 | /awesome_gans/sagan/sagan_train.py | 3fc74c4963e2f4e90b5b14d0b029222310ce5c31 | [
"MIT"
] | permissive | qqyouhappy/Awesome-GANs | a13ded69a043bc257966fcd3e71dc7a87b3f524f | 0f01852abbac0497baa8cc309a580ba720c0478f | refs/heads/master | 2022-12-22T14:42:56.765953 | 2020-10-05T14:53:01 | 2020-10-05T14:53:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,134 | py | import os
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.sagan.sagan_model as sagan
from awesome_gans.config import parse_args
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
cfg = parse_args()
train_step = {
'epochs': 11,
'batch_size': 64,
'global_step': 10001,
'logging_interval': 500,
}
def main():
start_time = time.time() # Clocking start
height, width, channel = 128, 128, 3
# loading CelebA DataSet # from 'raw images' or 'h5'
use_h5 = True
if not use_h5:
ds = DataSet(
height=height,
width=height,
channel=channel,
# ds_image_path="D:\\DataSet/CelebA/CelebA-%d.h5" % height,
ds_label_path=os.path.join(cfg.celeba_path, "Anno/list_attr_celeba.txt"),
ds_image_path=os.path.join(cfg.celeba_path, "Img/img_align_celeba/"),
ds_type="CelebA",
use_save=True,
save_file_name=os.path.join(cfg.celeba_path, "CelebA-%d.h5" % height),
save_type="to_h5",
use_img_scale=False,
)
else:
ds = DataSet(
height=height,
width=height,
channel=channel,
ds_image_path=os.path.join(cfg.celeba_path, "CelebA-%d.h5" % height),
ds_label_path=os.path.join(cfg.celeba_path, "Anno/list_attr_celeba.txt"),
# ds_image_path=os.path.join(cfg.celeba, "Img/img_align_celeba/"),
ds_type="CelebA",
use_save=False,
# save_file_name=os.path.join(cfg.celeba, "CelebA-%d.h5" % height),
# save_type="to_h5",
use_img_scale=False,
)
num_images = ds.num_images
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, height, width, channel))
iu.save_images(test_images, size=[4, 4], image_path=os.path.join(cfg.output_path, "sample.png"), inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
del ds
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# SAGAN Model
model = sagan.SAGAN(
s,
height=height,
width=width,
channel=channel,
batch_size=train_step['batch_size'],
use_gp=False,
use_hinge_loss=True,
)
# Initializing
s.run(tf.global_variables_initializer())
print("[*] Reading checkpoints...")
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state(cfg.model_path)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
global_step = saved_global_step
start_epoch = global_step // (num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epochs']):
for batch_x in ds_iter.iterate():
batch_x = iu.transform(batch_x, inv_type='127')
batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel))
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run([model.d_op, model.d_loss], feed_dict={model.x: batch_x, model.z: batch_z, })
# Update G network
_, g_loss = s.run([model.g_op, model.g_loss], feed_dict={model.x: batch_x, model.z: batch_z, })
if global_step % train_step['logging_interval'] == 0:
summary = s.run(model.merged, feed_dict={model.x: batch_x, model.z: batch_z, })
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(model.g_test, feed_dict={model.z_test: sample_z, })
# is_mean, is_std = t.inception_score(iu.inverse_transform(samples, inv_type='127'))
# fid_score = t.fid_score(real_img=batch_x, fake_img=samples[:model.batch_size])
# Print loss
print(
"[+] Epoch %04d Step %08d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
# " Inception Score : {:.2f} (±{:.2f})".format(is_mean, is_std),
# " FID Score : {:.2f}".format(fid_score)
)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = os.path.join(cfg.output, 'train_{:08d}.png'.format(global_step))
# Generated image save
iu.save_images(
samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127'
)
# Model save
model.saver.save(s, os.path.join(cfg.model_path, "SAGAN.ckpt"), global_step)
global_step += 1
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
| [
"kozistr@gmail.com"
] | kozistr@gmail.com |
eacad1766cda661fcba77d56519716da6ba9aea3 | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /other/panda365/panda365/pd/conf/tests/test_api.py | bce8a29c452848ac5f2021879686f02beb3367f5 | [] | no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 535 | py | from pd.test_utils import assert_dict_like
from pd.conf.factory import ConfFactory
def test_get(client, db_session):
conf = ConfFactory()
url_tpl = '/v1/conf/{}'
resp = client.get(url_tpl.format(conf.name))
assert resp.status_code == 200
assert_dict_like(resp.json, {
'name': conf.name,
'min_version': conf.min_version,
'latest_version': conf.latest_version,
'description': conf.description,
})
resp = client.get(url_tpl.format('blah'))
assert resp.status_code == 404
| [
"944951481@qq.com"
] | 944951481@qq.com |
dc177bf4ac10bb80f0ea2fae215f680358e4ea80 | 1da3173e935cb6d32ec3d9da7bf01ee91e6c3199 | /sbt/utils/enumerators.py | 02d6047c598a13a4f10128b8ac8b4229579a8cdb | [
"Apache-2.0"
] | permissive | PgBiel/sbt | 492b99c254bda9b404a89c438b3e3f0f13f358b7 | dfbca913751b13a251e335e271bee0e443d02afe | refs/heads/master | 2022-01-23T09:59:19.289276 | 2019-06-06T00:32:20 | 2019-06-06T00:32:20 | 197,852,145 | 0 | 0 | null | 2019-07-19T22:54:27 | 2019-07-19T22:54:27 | null | UTF-8 | Python | false | false | 1,030 | py | """
/utils/enumeration.py
Copyright (c) 2019 ShineyDev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__authors__ = [("shineydev", "contact@shiney.dev")]
__maintainers__ = [("shineydev", "contact@shiney.dev")]
__version_info__ = (2, 0, 0, "alpha", 0)
__version__ = "{0}.{1}.{2}{3}{4}".format(*[str(n)[0] if (i == 3) else str(n) for (i, n) in enumerate(__version_info__)])
__all__ = {
"RPS",
}
class RPS():
r = "\N{MOYAI}"
p = "\N{PAGE FACING UP}"
s = "\N{BLACK SCISSORS}" | [
"contact@shiney.dev"
] | contact@shiney.dev |
d3d27b0c4ef0f3a4e0890dfd6b88de514f40610e | 4dc4345cca9c5f452bf4b87263505ee6b4e960af | /text_processing_exercise/letters_change_numbers.py | 82fa25b63154fba7ad44fd25241f820d90ab2dd8 | [] | no_license | ivan-yosifov88/python_fundamentals | 88c7eb5167bbe6692b95051d1551496a84893524 | 1cfe6d18453362fc26be984f6cb871b9d7dec63d | refs/heads/master | 2023-03-29T16:46:55.363035 | 2021-04-07T10:39:44 | 2021-04-07T10:39:44 | 341,604,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | alphabet_dict = {}
for index in range(1, 27):
letter = 96 + index
alphabet_dict[chr(letter)] = int(index)
words_to_manipulate = input().split()
total_sum = 0
for word in words_to_manipulate:
first_letter = word[:1]
second_letter = word[-1:]
number = int(word[1:-1])
if first_letter.isupper():
divider = alphabet_dict[first_letter.lower()]
total_sum += number / divider
elif first_letter.islower():
multiplier = alphabet_dict[first_letter]
total_sum += number * multiplier
if second_letter.isupper():
subtract = alphabet_dict[second_letter.lower()]
total_sum -= subtract
elif second_letter.islower():
add = alphabet_dict[second_letter]
total_sum += add
print(f"{total_sum:.2f}") | [
"ivan.yosifov88gmail.com"
] | ivan.yosifov88gmail.com |
f20c7d5f767aae7d5913374a5a2ba3591d11f2dd | 0e5aa2f88c770457e91289aa886c24d7faca0677 | /viewport.py | 601c4c33f5bc469663ed2cc056d465161ccebe44 | [] | no_license | su8/pygobject-examples | 54ea6dbb894ef735f87703ce60a6b5bdbad8c3e9 | 27c3f94b21a731628dac13bb0ad573474864839b | refs/heads/master | 2021-04-03T09:39:05.614517 | 2016-11-12T20:55:12 | 2016-11-12T20:55:12 | 124,675,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | #!/usr/bin/env python3
from gi.repository import Gtk
window = Gtk.Window()
window.connect("destroy", lambda q: Gtk.main_quit())
grid = Gtk.Grid()
window.add(grid)
viewport = Gtk.Viewport()
viewport.set_size_request(200, 200)
grid.attach(viewport, 0, 0, 1, 1)
vadjustment = viewport.get_vadjustment()
hadjustment = viewport.get_hadjustment()
vscrollbar = Gtk.VScrollbar()
grid.attach(vscrollbar, 1, 0, 1, 1)
hscrollbar = Gtk.HScrollbar()
grid.attach(hscrollbar, 0, 1, 1, 1)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
viewport.add(box)
for i in range(0, 15):
button = Gtk.Button(label="Button %s" % i)
box.pack_start(button, True, True, 0)
window.show_all()
Gtk.main() | [
"a@b.c"
] | a@b.c |
f220eafb22d10d6f90076ad75da2189397169500 | 2f8f8171b3b996b0c866ede72367ec26f64eae39 | /sampleproject/www/Project_Euler/problem001_050/problem024.py | 4002e5d9acde3b0609b412965436d2994b511a5a | [] | no_license | kabaksh0507/exercise_python_it-1 | da46edce09301b03a5351ee1885fb01eb69d8240 | 2b6c80a79494c9981e51bd03696c3aa19d6625ec | refs/heads/main | 2023-03-04T03:12:44.188468 | 2021-02-08T08:55:36 | 2021-02-08T08:55:36 | 337,014,697 | 0 | 0 | null | 2021-02-08T08:57:30 | 2021-02-08T08:57:30 | null | UTF-8 | Python | false | false | 2,561 | py | '''Project Euler Problem 24'''
def problem_24() -> str:
'''Lexicographic permutations'''
target_num = 1000000
pattern_list = list(range(10))
ret = search_pattern(target_num, pattern_list)
return ret
def search_pattern(target_num: int, select_list: list) -> str:
'''
select_list内データを1つずつ使用した数字を辞書順に並べた際にtarget_num番目の数字を返す。
使用した数字は再度使用できない。辞書順のため大きい桁にある数字が小さいほうから順に並べられる。
X桁の選択パターンがtarget_numを越えるタイミングを見つける。
ex)
10個の数字の10桁の組み合わせパターンは10!通り。
9桁以降の組み合わせは9!通り、8桁以降の組み合わせは8!通り、 ... 2桁以降の組み合わせは2!通り、1桁の組み合わせは1!通り。
辞書順のため大きい桁に小さい値を設定した場合の動作を感が会える。
X桁目の値は、X-1桁のパターン数がtarget_numに何回入るかで決まる。
ex) リスト[0, 1, 2]の組み合わせの4番目の数値の場合
3桁目の値は、2桁目のパターン数が2通り(2!通り)のであり、4番目に2回入る。
そのため3桁目の値はリスト内2番目値である1になる。
上記を再帰処理として繰り返すことでtarget_num番目の値を求める。
作成後追記
target_numが0以下の場合は1と同等の動きをする。
'''
if len(select_list) == 0:
# 再帰関数であり、選択肢がなくなると終了
return ''
if target_num > number_kai(len(select_list)):
# target_num番目のパターンがない。(全組み合わせパターンを超える値を要求される)
return 'out of range : target number'
select_list.sort()
next_keta_pattern_num = number_kai(len(select_list)-1)
select_num = ''
for i in range(len(select_list)):
if target_num <= next_keta_pattern_num*(i+1):
select_num = str(select_list.pop(i))
return select_num + search_pattern(target_num - next_keta_pattern_num*i, select_list)
return 'Error'
def number_kai(num: int) -> int:
'''
n! = n * (n-1) * (n-2) * ... * 1
'''
ret = 1
for i in range(1, num+1):
ret *= i
return ret
if __name__ == '__main__':
print(problem_24())
| [
"kazkitou9080@gmail.com"
] | kazkitou9080@gmail.com |
bafe1e52daa3fcf78f8cf84a110dea1f2ed01737 | 3dcfa266c4b7321a4c3a224b98f9ca0dff891e47 | /archives/prioque.py | 869bdbf0ed3ff28d32db9ff399a170e2b25b858f | [] | no_license | CandyTt20/Notes | a2ef681d123c5219a29334e99aeb900b74bf1834 | ec092f881122ebdd91ef9764ec7ce4d9cc4723ae | refs/heads/master | 2022-08-21T18:08:33.204223 | 2020-05-19T23:55:49 | 2020-05-19T23:55:49 | 255,209,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | class PrioQue(object):
#! 线性表实现
def __init__(self, elist=[]):
self._elist = list(elist)
self._elist.sort(reverse=True)
def insert_que(self, elem):
i = len(self._elist) - 1
while i >= 0 and self._elist[i] < elem:
i -= 1
self._elist.insert(i + 1, elem)
def is_empty(self):
if len(self._elist) > 0:
return False
else:
return True
def peek(self):
if self.is_empty():
return None
else:
return self._elist[len(self._elist)-1]
def pop_que(self):
if self.is_empty():
return None
else:
return self._elist.pop()
x = PrioQue([5, 1, 2, 6, 3])
x.insert_que(4)
while x._elist:
print(x.pop_que())
print(x._elist)
| [
"458566293@qq.com"
] | 458566293@qq.com |
b971c99a94802b5ef22c261ffe180a7af8d278a1 | 8924bd3df018bdee62a5e5d99069171771d9f459 | /Important/aws-tutorial-code/lambda/lambda_read_pdf_s3_trigger.py | 8e80689c37c5b36f7db72cf78b0876ed8c6100cf | [
"MIT"
] | permissive | abhi15sep/Lambda-Final | f5985dc1d2aef4965764ec452a5e2949d78a202b | fed8be8ddeb7325e594fb426bfcd0f388f3d0f67 | refs/heads/master | 2023-04-19T02:06:09.808966 | 2021-05-02T21:23:22 | 2021-05-02T21:23:22 | 336,569,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | #-*- coding: utf-8 -*-
__author__ = "Chirag Rathod (Srce Cde)"
__license__ = "MIT"
__email__ = "chiragr83@gmail.com"
__maintainer__ = "Chirag Rathod (Srce Cde)"
import json
import boto3
import fitz
def lambda_handler(event, context):
"""Read file from s3 on trigger."""
# boto3 client
s3 = boto3.client("s3")
if event:
file_obj = event["Records"][0]
# fetching bucket name from event
bucketname = str(file_obj['s3']['bucket']['name'])
# fetching file name from event
filename = str(file_obj['s3']['object']['key'])
# retrieving object from S3
fileObj = s3.get_object(Bucket=bucketname, Key=filename)
# reading botocore stream
file_content = fileObj["Body"].read()
# loading pdf from memory/stream
with fitz.open(stream=file_content, filetype="pdf") as doc:
text = ""
# iterating through pdf file pages
for page in doc:
# fetching & appending text to text variable of each page
text += page.getText()
print(text)
return {
'statusCode': 200,
'body': json.dumps('Thanks from Srce Cde!')
}
| [
"abhaypratap3536@gmail.com"
] | abhaypratap3536@gmail.com |
b0c708b3b1fb2ed265dffef8b360333c76f2466a | 36b75aac4236e928e22552e8812abd45d32aecf1 | /modules/dbnd/test_dbnd/tracking/callable_tracking/test_no_side_affects.py | 22f25cdb1e1c99a40b646c2ce2536d287438fd2d | [
"Apache-2.0"
] | permissive | reloadbrain/dbnd | 7793aa1864f678005de626068b0ac9361d637d65 | ec0076f9a142b20e2f7afd886ed1a18683c553ec | refs/heads/master | 2023-09-01T08:04:09.486666 | 2021-10-14T16:43:00 | 2021-10-14T16:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py | import pickle
from pytest import fixture
from dbnd import config, task
from dbnd._core.configuration.environ_config import get_max_calls_per_func
@task
def task_pass_through_result_param(result):
assert isinstance(result, str)
return str(result)
@task
def task_pass_through_args_kwargs(*args, **kwargs):
return {
"args": args,
"kwargs": kwargs,
}
@task
def task_in_conf(param1="default_value", param2=None):
return param1, param2
class TestNoSideAffectsOnTracking(object):
@fixture(autouse=True)
def _tracking_context(self, set_tracking_context):
pass
def test_tracking_pass_through_result_param(self, pandas_data_frame_on_disk):
df, df_file = pandas_data_frame_on_disk
assert task_pass_through_result_param(result=str(df_file)) == str(df_file)
def test_tracking_pass_through_args_kwargs(self, pandas_data_frame_on_disk):
df, df_file = pandas_data_frame_on_disk
res = task_pass_through_args_kwargs(str(df_file), data=df, result=str(df_file))
assert res["args"] == (str(df_file),)
assert len(res["kwargs"]) == 2
assert res["kwargs"]["data"] is df
assert res["kwargs"]["result"] == str(df_file)
def test_partial_params(self):
param1, param2 = task_in_conf(param2="param2_value")
assert param1 == "default_value"
assert param2 == "param2_value"
def test_task_in_conf(self):
# in_conf - shouldn't affect anything
with config(
{"task_in_conf": {"param1": "conf_value", "param2": "conf_value"}},
source="test_source",
):
param1, param2 = task_in_conf(param2="param2_value")
assert param1 == "default_value"
assert param2 == "param2_value"
def test_pickle(self):
pickled = pickle.dumps(task_pass_through_args_kwargs)
assert task_pass_through_args_kwargs == pickle.loads(pickled)
def test_tracking_limit(self, mock_channel_tracker):
@task
def inc_task(x):
return x + 1
max_calls_allowed = get_max_calls_per_func()
extra_func_calls = 10
n = 0
for i in range(max_calls_allowed + extra_func_calls):
n = inc_task(n)
# ensure that function was actually invoked all the times (max_calls_allowed + extra_func_calls)
assert max_calls_allowed + extra_func_calls == n
# check that there was only max_calls_allowed "tracked" calls
track_call = [
x
for x in mock_channel_tracker.call_args_list
if x.args[0].__name__ == "log_targets"
]
assert max_calls_allowed == len(track_call)
| [
"roman.slipchenko@databand.ai"
] | roman.slipchenko@databand.ai |
40354a15c13bfb900f01fee589091789eb0e071f | 6c58da2c54a3d35273e7984313d181f1da9981fc | /Users/djangoEnv/bin/easy_install-2.7 | d2fc7d4f26da40fa01f2c42ae4b7550560e15d0e | [
"MIT-0"
] | permissive | py1-10-2017/rgero215_PY1-10-2017 | e582cb12cc63f84b1c0c14d09a922cb6cb228016 | f455b335ec9c8c850571f3a75dcd95759b4cfdad | refs/heads/master | 2021-09-04T03:23:48.062326 | 2018-01-14T21:07:26 | 2018-01-14T21:07:26 | 105,612,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | 7 | #!/Users/RGero13/Desktop/rgero215_PY1-10-2017/Users/djangoEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rgero215@gmail.com"
] | rgero215@gmail.com |
9b3c07180d0aae51da6beadf9de05ef72f4b3789 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-live/aliyunsdklive/request/v20161101/DeleteLiveStreamRecordIndexFilesRequest.py | b699537542cfd3d96cc4b06bcf2359fa98da73b4 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,480 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class DeleteLiveStreamRecordIndexFilesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'DeleteLiveStreamRecordIndexFiles','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RemoveFile(self): # String
return self.get_query_params().get('RemoveFile')
def set_RemoveFile(self, RemoveFile): # String
self.add_query_param('RemoveFile', RemoveFile)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_StreamName(self): # String
return self.get_query_params().get('StreamName')
def set_StreamName(self, StreamName): # String
self.add_query_param('StreamName', StreamName)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_RecordIds(self): # RepeatList
return self.get_query_params().get('RecordId')
def set_RecordIds(self, RecordId): # RepeatList
for depth1 in range(len(RecordId)):
self.add_query_param('RecordId.' + str(depth1 + 1), RecordId[depth1])
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
a632af37ea6591032897d65fc5beb33456c14888 | 6b7f11270232000042d51f97f616c1a19c047389 | /elepay/model/location_dto.pyi | a1dccde82442a223f0ad9e4dac8562e6718ab2f1 | [] | no_license | elestyle/elepay-python-sdk | 07e6bc8d0a42b5217a4144ab5632b0cc548aef58 | 36dd8502047df4c6f8eaba53e216bae25843c3c5 | refs/heads/master | 2023-02-04T08:47:15.287825 | 2023-01-24T02:28:45 | 2023-01-24T02:28:45 | 206,721,107 | 2 | 0 | null | 2023-01-24T02:15:52 | 2019-09-06T05:41:33 | Python | UTF-8 | Python | false | false | 4,612 | pyi | # coding: utf-8
"""
elepay API リファレンス
elepay APIはRESTをベースに構成された決済APIです。支払い処理、返金処理など、決済に関わる運用における様々なことができます。 # noqa: E501
The version of the OpenAPI document: 1.2.0
Contact: support@elestyle.jp
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from elepay import schemas # noqa: F401
class LocationDto(
schemas.DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Locationオブジェクト
"""
class MetaOapg:
class properties:
id = schemas.StrSchema
name = schemas.StrSchema
country = schemas.StrSchema
description = schemas.StrSchema
logoUrl = schemas.StrSchema
__annotations__ = {
"id": id,
"name": name,
"country": country,
"description": description,
"logoUrl": logoUrl,
}
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["country"]) -> MetaOapg.properties.country: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["logoUrl"]) -> MetaOapg.properties.logoUrl: ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "country", "description", "logoUrl", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> typing.Union[MetaOapg.properties.id, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["country"]) -> typing.Union[MetaOapg.properties.country, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["logoUrl"]) -> typing.Union[MetaOapg.properties.logoUrl, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "country", "description", "logoUrl", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, ],
id: typing.Union[MetaOapg.properties.id, str, schemas.Unset] = schemas.unset,
name: typing.Union[MetaOapg.properties.name, str, schemas.Unset] = schemas.unset,
country: typing.Union[MetaOapg.properties.country, str, schemas.Unset] = schemas.unset,
description: typing.Union[MetaOapg.properties.description, str, schemas.Unset] = schemas.unset,
logoUrl: typing.Union[MetaOapg.properties.logoUrl, str, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'LocationDto':
return super().__new__(
cls,
*args,
id=id,
name=name,
country=country,
description=description,
logoUrl=logoUrl,
_configuration=_configuration,
**kwargs,
)
| [
"long.peng@elestyle.jp"
] | long.peng@elestyle.jp |
79a9ca1ff80bd0b65d9154b7ec175969fbb27012 | bcabd9b183bc011e1ccf7e367fbed0dcaa03eee6 | /1 PYTHON/1 EDUREKA/EDUREKA OLD/15 Flow Control.py | 5477c0e3805419ff7305edd254f4bc304944ea76 | [] | no_license | rajeshsvv/Lenovo_Back | 287fe4da2c696aa248ec57a4c45c4f234f6ca9ed | 7e49e38aaf934c65f9992a78404d2b81a4cd0204 | refs/heads/master | 2022-12-23T16:44:41.488128 | 2019-08-29T10:00:10 | 2019-08-29T10:00:10 | 204,859,914 | 0 | 1 | null | 2022-12-10T11:50:31 | 2019-08-28T06:05:35 | Python | UTF-8 | Python | false | false | 543 | py | '''
marks=20
if(marks>80) and (marks<=100):
print("GRADE A")
elif(marks>60) and (marks<=80):
print("GRADE B")
elif(marks>40) and (marks<=60):
print("GRADE C")
elif marks>=20 and marks<=40:
print("GRADE D")
else:
print("Please Enter Marks in between range 0 to 100")
'''
#while loop add numbers upto from given number to zero for example 5 then 5+4+3+2+1=15
num=int(input("Enter the value of n="))
if (num<=0):
print("Enter a valid value")
else:
sum=0
while (num>0):
sum+=num
num-=1
print(sum)
| [
"rajeshsvv01@gmail.com"
] | rajeshsvv01@gmail.com |
c27112ab8dbb9ade9e181d280ddd8534a8a5ca75 | 78d0d278d72afb500fc68ee3a45c39b80ccf193c | /utils/logging.py | 795095f1c4e80a53d50aa23ec714fb3b77caa422 | [] | no_license | mzq308734881/P2Net.pytorch | b56e501e2257b8b017f96cc70f8ba724cb8ee83f | 5e40745d0cba647dc02a9f6ea114e326e26e3a0a | refs/heads/master | 2022-12-25T04:28:26.198875 | 2020-10-07T16:42:35 | 2020-10-07T16:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | from __future__ import absolute_import
import os
import errno
import sys
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
try:
os.makedirs(os.path.dirname(fpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
| [
"jyguo@pku.edu.cn"
] | jyguo@pku.edu.cn |
421316f64e6767eb54d7e1b4351d921a48b5a002 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_protection_policy_operation_results_operations.py | 7f2085e9859ca31d51d469884b033b61357d8c8b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 5,236 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProtectionPolicyOperationResultsOperations:
"""ProtectionPolicyOperationResultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.ProtectionPolicyResource":
"""Provides the result of an operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param policy_name: Backup policy name whose operation's result needs to be fetched.
:type policy_name: str
:param operation_id: Operation ID which represents the operation whose result needs to be
fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.ProtectionPolicyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProtectionPolicyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProtectionPolicyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}'} # type: ignore
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
2a5e83c5e9b26ae41173ccc7e6577802969b9966 | 3db89b4f5ada46de1534e65e1ddf38d5fb10202a | /cookiespostform/testapp/views.py | ad081fc8a6ad25b3cd0f4929773c837725a95232 | [] | no_license | abhinav375/DjangoProjects | 42a5744527a5cf170daa2af3369e5652a5e87591 | 9d7de14631f8e2b57c66b517da240e36872b11c3 | refs/heads/master | 2022-11-12T14:05:51.983610 | 2020-07-11T08:32:59 | 2020-07-11T08:32:59 | 278,821,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from django.shortcuts import render
from . import forms
# Create your views here.
'''COOKIES CODE'''
def index(request):
form=forms.studentRegustration()
response=render(request,'testapp/wish.html',{'form':form})
if request.method=='POST':
print('inside post')
form=forms.studentRegustration(request.POST)
if(form.is_valid()):
print("inside 2nd post")
#name=request.POST['name']
#roll_no=request.POST['roll_No']
name=form.cleaned_data['name']
roll_no=form.cleaned_data['roll_No']
response.set_cookie('name',name)
response.set_cookie('roll_no',roll_no)
print(name,roll_no)
return response
'''SESSION CODE'''
'''def index(request):
form=forms.studentRegustration()
if request.method=='POST':
print('inside post')
form=forms.studentRegustration(request.POST)
if(form.is_valid()):
print("inside 2nd post")
#name=request.POST['name']
#roll_no=request.POST['roll_No']
name=form.cleaned_data['name']
roll_no=form.cleaned_data['roll_No']
request.session['name']=name
request.session['roll_no']=roll_no
print(name,roll_no)
return render(request,'testapp/wish.html',{'form':form})'''
def show(request):
return render(request,'testapp/index.html') | [
"you@example.com"
] | you@example.com |
eaf5393d03c4bfada9933355ce396fc33623620d | 584e9c42e6240b9facc866703a6f26b06773df94 | /Oreilly/index_power.py | 16e65372a6b847c25ea55fe8a59c26a35c99ad56 | [] | no_license | anton-dovnar/checkio | 48fbaf84c244b0fca7bed5cf7f34179cf850adf9 | 10aed757ec36f182871a03ed8c9e73319cc8824a | refs/heads/master | 2023-03-24T16:23:39.524060 | 2021-03-12T13:07:04 | 2021-03-12T13:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | #!/home/fode4cun/.local/share/virtualenvs/checkio-ufRDicT7/bin/checkio --domain=py run index-power
# You are given an array with positive numbers and a number N. You should find the N-th power of the element in the array with the index N. If N is outside of the array, then return -1. Don't forget that the first element has the index 0.
#
# Let's look at a few examples:
# - array = [1, 2, 3, 4] and N = 2, then the result is 32== 9;
# - array = [1, 2, 3] and N = 3, but N is outside of the array, so the result is -1.
#
# Input:Two arguments. An array as a list of integers and a number as a integer.
#
# Output:The result as an integer.
#
# Precondition:0 < len(array) ≤ 10
# 0 ≤ N
# all(0 ≤ x ≤ 100 for x in array)
#
#
#
# END_DESC
import math
def index_power(array: list, n: int) -> int:
"""
Find Nth power of the element with index N.
"""
try:
val = array[n]
return int(math.pow(val, n))
except IndexError:
return -1
if __name__ == '__main__':
print('Example:')
print(index_power([1, 2, 3, 4], 2))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
| [
"fode4cun@gmail.com"
] | fode4cun@gmail.com |
567a743c25df140f8aaac2ceef3ce8511d071da6 | b1e7286dbd522cd5eecd7b686d446f1645681f66 | /src/backend/datasets/migrations/0015_proj_exp_run_unique.py | 35a96b742c3023906426a34aa931b838743710a2 | [] | no_license | glormph/kantele | 69f9c040a885b7b4ff412db945fafb7b4cfc7c07 | 6b8dfea90968ad34db95e2c0e37289288330f6d3 | refs/heads/master | 2023-08-07T13:51:32.595737 | 2023-07-20T09:15:02 | 2023-07-20T09:15:02 | 7,146,448 | 3 | 2 | null | 2023-09-13T08:20:15 | 2012-12-13T10:25:42 | Python | UTF-8 | Python | false | false | 696 | py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0014_merge_duplicate_projects'),
]
operations = [
migrations.AlterField(
model_name='project',
name='name',
field=models.TextField(unique=True),
),
migrations.AddConstraint(
model_name='experiment',
constraint=models.UniqueConstraint(fields=('name', 'project'), name='uni_expproj'),
),
migrations.AddConstraint(
model_name='runname',
constraint=models.UniqueConstraint(fields=('name', 'experiment'), name='uni_runexp'),
),
]
| [
"jorrit.boekel@scilifelab.se"
] | jorrit.boekel@scilifelab.se |
7b5b2cfee06f7fc847e10c4a981a7a5955ab1d0b | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/ex45-test_20190608175151.py | f62ed598c4c6b6f8d7975013b865de111467448e | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py |
class Room1():
def enter():
print("You enter room 1")
class Room2():
def enter():
print("You enter room 2")
class Map():
def __init__(self, starting_room):
self.starting_room = starting_room
self.locations = {
'room1': Room1,
'room2': Room2
}
class Engine():
def __init__(self, map):
self.map = map
def play(self):
| [
"ahivent@gmail.com"
] | ahivent@gmail.com |
4ac036ed6281c7ea308ec337312bb37f199de5f4 | a3d058c6a80d4068fa4d3185ddd2dec91abc82d7 | /merge_sort.py | ec31be8a8d73eaaca1cf87b074ff8b196a0dcbc5 | [] | no_license | guard1000/Everyday-coding | d6f496654b635738a4284270f6c5d285116a760e | 7755f99cdb512d623392af82282bf17b47cb77f2 | refs/heads/master | 2021-08-18T22:26:04.322162 | 2021-07-21T14:53:28 | 2021-07-21T14:53:28 | 161,440,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | def mergeSort(alist):
if len(alist)>1:
mid = len(alist)//2 #mid를 기준으로
lefthalf = alist[:mid] #리스트 슬라이싱으로 분할
righthalf = alist[mid:]
mergeSort(lefthalf) #재귀 호출
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf): #
if lefthalf[i] < righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
alist=[]
with open('data.txt') as f: #데이터를 READ하는 모듈
lines = f.read().split() #data.txt 파일에 들어있는 데이터들을 line별로 읽어와서
for line in lines: #All리스트에 append 시켜줍니다.
alist.append(int(line))
print('머지소팅 전')
print(alist)
mergeSort(alist)
print('머지소팅 후')
print(alist) | [
"cjsdnr885@naver.com"
] | cjsdnr885@naver.com |
0c66d8a9ced9f7ec361a037765805fa6792abdb6 | 072e68a2edddd98e3d534207169e9bbd0dda86d1 | /math_/math_floor.py | 4765869282478340137655799f4efff8588d714f | [] | no_license | raul-jr3/PyTuts | 22f6171476f707acdb5beb80fc7974202c765717 | 546d92676ce5790a5865349ff11adc35b245bb09 | refs/heads/master | 2020-03-07T09:34:18.240237 | 2018-03-30T12:43:34 | 2018-03-30T12:43:34 | 127,411,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py |
import math
# FLOOR
# the floor function returns the largest integer which is less than or equal to the
# number on which floor is applied
a = 23.66
# so when the floor is applied on 23.66 the greatest value which is <= 23.66 is 23.0
result = math.floor(a)
# so this returns 23.0
print(result)
# and if we have a negative value
b = -23.77
# then I apply floor on it
output = math.floor(b)
# the output will be -24.00 because -24.00 < -23.77
print(output)
| [
"rahul.srivatsav1995@gmail.com"
] | rahul.srivatsav1995@gmail.com |
aff6565707f7b4e3ab9e9b6d44ff5ca4a8df9e0f | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/problems/1227.airplane-seat-assignment-probability.py | 5ea53e78f549aa1ca11296ef89c68e5f4bb6a8f1 | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | #
# @lc app=leetcode id=1227 lang=python3
#
# [1227] Airplane Seat Assignment Probability
#
# https://leetcode.com/problems/airplane-seat-assignment-probability/description/
#
# algorithms
# Medium (61.16%)
# Total Accepted: 2.6K
# Total Submissions: 4.2K
# Testcase Example: '1'
#
# n passengers board an airplane with exactly n seats. The first passenger has
# lost the ticket and picks a seat randomly. But after that, the rest of
# passengers will:
#
#
# Take their own seat if it is still available,
# Pick other seats randomly when they find their seat occupied
#
#
# What is the probability that the n-th person can get his own seat?
#
#
# Example 1:
#
#
# Input: n = 1
# Output: 1.00000
# Explanation: The first person can only get the first seat.
#
# Example 2:
#
#
# Input: n = 2
# Output: 0.50000
# Explanation: The second person has a probability of 0.5 to get the second
# seat (when first person gets the first seat).
#
#
#
# Constraints:
#
#
# 1 <= n <= 10^5
#
#
class Solution:
def nthPersonGetsNthSeat(self, n: int) -> float:
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
f0f8a062872fffa9fc34360a0ee4a31e40c80774 | e5453b6a4b84a32ccca7281d438b7a7fa1853f58 | /src/hmm/checks/huawei_hmm_mezz28_check.py | 96fe9b7fb699bad7f1d4588465e23c627a7e1565 | [
"MIT"
] | permissive | Huawei/Server_Management_Plugin_Check_MK | 88445d9da581c347c5e82cf590453c4cb2c3d53c | 88398c7c8affe0b2064f418de931d69e36afde67 | refs/heads/master | 2021-05-11T11:40:55.302518 | 2021-01-27T09:53:17 | 2021-01-27T09:53:17 | 117,641,709 | 1 | 4 | null | 2018-01-31T05:38:01 | 2018-01-16T06:30:39 | null | UTF-8 | Python | false | false | 1,244 | py | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def inventory_huawei_mezz_health(info):
return [("blade 28 mezz status", None)]
def scan(oid):
return (oid(".1.3.6.1.4.1.2011.2.82.1.82.4.28.6.0") == '1')
check_info["huawei_hmm_mezz28_check"] = {
"inventory_function": inventory_huawei_mezz_health,
"check_function": check_huawei_mezz_health,
"service_description": "%s",
'includes': ["huawei_hmm_util.include", ],
"snmp_info": (".1.3.6.1.4.1.2011.2.82.1.82.4.28.2008.1", ["4", "5", "2", ]),
'snmp_scan_function': scan,
}
| [
"31431891+serverplugin@users.noreply.github.com"
] | 31431891+serverplugin@users.noreply.github.com |
62627307352f8f7f4837cc9fa705a66e90b039fa | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/0070.0_Climbing_Stairs.py | 2f2e3545798d3174f99fdd72378d905d9ded0543 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | '''
DP
执行用时:48 ms, 在所有 Python3 提交中击败了10.11% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了59.27% 的用户
通过测试用例:45 / 45
'''
class Solution:
def climbStairs(self, n: int) -> int:
a = b = 1
for _ in range(n - 1):
a, b = b, a + b
return b
'''
DFS + memo
执行用时:40 ms, 在所有 Python3 提交中击败了10.11% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了71.85% 的用户
通过测试用例:45 / 45
'''
class Solution:
@functools.lru_cache(50)
def climbStairs(self, n: int) -> int:
return self.climbStairs(n - 1) + self.climbStairs(n - 2) if n > 2 else n
'''
DFS
执行用时:40 ms, 在所有 Python3 提交中击败了32.24% 的用户
内存消耗:15 MB, 在所有 Python3 提交中击败了9.49% 的用户
通过测试用例:45 / 45
'''
class Solution:
@cache
def climbStairs(self, n: int) -> int:
if n in [1, 2]:
return n
return self.climbStairs(n - 1) + self.climbStairs(n - 2)
| [
"laoxing201314@outlook.com"
] | laoxing201314@outlook.com |
7a4d0807aba6f1a6a45123141383320c7e47457b | 76e6d4f93078327fef8672133fc75a6f12abc240 | /ABC115/B.py | a7a98c56a06aeac07943f556983967b981e0dbbc | [] | no_license | adusa1019/atcoder | 1e8f33253f6f80a91d069b2f3b568ce7a2964940 | f7dbdfc021425160a072f4ce4e324953a376133a | refs/heads/master | 2021-08-08T04:41:36.098678 | 2021-02-01T07:34:34 | 2021-02-01T07:34:34 | 89,038,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def solve(string):
ins = list(map(int, string.split("\n")))[1:]
ins.sort()
ins[-1] //= 2
return str(sum(ins))
if __name__ == '__main__':
n = int(input())
ins = [input() for _ in range(n)]
print(solve("{}\n{}".format(n, "\n".join(ins))))
| [
"symphony20030829@yahoo.co.jp"
] | symphony20030829@yahoo.co.jp |
540f50617caf6c1021261198e47bb8183bc6dc47 | 9879c8a1f1ac5884d9220e51c6256bb651fc800e | /pyot/utils/locks.py | 685040cb08256a4e767a743a6490ad93cd67e174 | [
"MIT"
] | permissive | rasmusdoh/Pyot | d260ee37b59cca026c9edd3e9be85f2197604df6 | de5065c55f171bb39691ddc76da99c5f16da94d9 | refs/heads/master | 2023-08-05T15:56:17.726457 | 2021-09-15T20:59:39 | 2021-09-15T20:59:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | from threading import Lock
import asyncio
try: # Delay exception if aioredlock is not installed
from aioredlock import Aioredlock
except ModuleNotFoundError as e:
Aioredlock = e
from .eventloop import LoopSensitiveManager
class SealLock:
'''
An asynchronous threading Lock. The event loop won't be blocked when acquiring the lock.
'''
def __init__(self):
self._lock = Lock()
async def __aenter__(self, *args):
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._lock.acquire)
return self
async def __aexit__(self, *args):
self._lock.release()
async def acquire(self):
'''Acquire the lock without locking the loop'''
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._lock.acquire)
return self
def release(self):
'''Release the lock, this is not async for the sake of easier cleanup (e.g. registering `atexit`)'''
self._lock.release()
class RedisLock:
'''
An asynchronous redis Lock. The event loop won't be blocked when acquiring the lock.
'''
def __init__(self, host: str, port: int, db: int, retry_count: int, retry_delay_min: float, retry_delay_max: float):
if isinstance(Aioredlock, Exception):
raise Aioredlock
self._connections = [f"redis://{host}:{port}/{db}"]
self.retry_count = retry_count
self.retry_delay_min = retry_delay_min
self.retry_delay_max = retry_delay_max
self._lock_managers = LoopSensitiveManager(self._new_lock_manager)
def _new_lock_manager(self):
return Aioredlock(self._connections, self.retry_count, self.retry_delay_min, self.retry_delay_max)
async def __call__(self, name: str, timeout: int = 10):
return await (await self._lock_managers.get()).lock(name, lock_timeout=timeout)
| [
"paaksingtech@gmail.com"
] | paaksingtech@gmail.com |
2eccd7e9659193d54269e649310e0f205b2c5d00 | 777b5c266360b29b6d4af916726abd5d364b74a1 | /mypy_stubs/django/core/files/uploadhandler.pyi | 3950a128c4d65f66db341a6e76f3740782aca4ee | [] | no_license | uryyyyyyy/django-graphql | 44d08afc3e44514270d1d5c183caa9d1c1cf3f88 | f3d6513d2325a8e675e47500cc71d8ef56c01537 | refs/heads/master | 2021-06-10T11:11:45.110271 | 2019-02-28T07:39:54 | 2019-02-28T07:39:54 | 172,325,424 | 0 | 0 | null | 2021-04-20T17:56:57 | 2019-02-24T10:44:31 | Python | UTF-8 | Python | false | false | 1,972 | pyi | # Stubs for django.core.files.uploadhandler (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
class UploadFileException(Exception): ...
class StopUpload(UploadFileException):
connection_reset: Any = ...
def __init__(self, connection_reset: bool = ...) -> None: ...
class SkipFile(UploadFileException): ...
class StopFutureHandlers(UploadFileException): ...
class FileUploadHandler:
chunk_size: Any = ...
file_name: Any = ...
content_type: Any = ...
content_length: Any = ...
charset: Any = ...
content_type_extra: Any = ...
request: Any = ...
def __init__(self, request: Optional[Any] = ...) -> None: ...
def handle_raw_input(self, input_data: Any, META: Any, content_length: Any, boundary: Any, encoding: Optional[Any] = ...) -> None: ...
field_name: Any = ...
def new_file(self, field_name: Any, file_name: Any, content_type: Any, content_length: Any, charset: Optional[Any] = ..., content_type_extra: Optional[Any] = ...) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any) -> None: ...
def file_complete(self, file_size: Any) -> None: ...
def upload_complete(self) -> None: ...
class TemporaryFileUploadHandler(FileUploadHandler):
file: Any = ...
def new_file(self, *args: Any, **kwargs: Any) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any) -> None: ...
def file_complete(self, file_size: Any): ...
class MemoryFileUploadHandler(FileUploadHandler):
activated: Any = ...
def handle_raw_input(self, input_data: Any, META: Any, content_length: Any, boundary: Any, encoding: Optional[Any] = ...) -> None: ...
file: Any = ...
def new_file(self, *args: Any, **kwargs: Any) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any): ...
def file_complete(self, file_size: Any): ...
def load_handler(path: Any, *args: Any, **kwargs: Any): ...
| [
"koki@anymindgroup.com"
] | koki@anymindgroup.com |
7db42065296d3a546b8f9ca6e08852751979f507 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/final/high_overhead/job74.py | 220089b171b01acea0550fae22e4fd603da2fcd0 | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,348 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.nasnet import NASNetMobile
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'mnasnet'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 20
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
time.sleep(100)
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = NASNetMobile(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"baolin.li1994@gmail.com"
] | baolin.li1994@gmail.com |
dae1a76cf051ae977abd68756071be26c70941dd | 0db19410e9751790af8ce4a0a9332293e379c02f | /configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py | 5a83e7a97b9478031f7ca4dcc4dccba0350d432d | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 4,070 | py | _base_ = ['../../../_base_/default_runtime.py']
# runtime
train_cfg = dict(max_epochs=300, val_interval=10)
# optimizer
optim_wrapper = dict(optimizer=dict(
type='AdamW',
lr=5e-4,
))
# learning policy
param_scheduler = [
dict(
type='LinearLR', begin=0, end=500, start_factor=0.001,
by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=0,
end=210,
milestones=[170, 200],
gamma=0.1,
by_epoch=True)
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=512)
# hooks
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
# codec settings
codec = dict(
type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)
# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained',
checkpoint='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth'),
),
head=dict(
type='HeatmapHead',
in_channels=32,
out_channels=23,
deconv_out_channels=None,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
test_cfg=dict(
flip_test=True,
flip_mode='heatmap',
shift_heatmap=True,
))
# base dataset settings
dataset_type = 'AnimalKingdomDataset'
data_mode = 'topdown'
data_root = 'data/ak/'
# pipelines
train_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(type='RandomBBoxTransform'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=32,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/ak_P3_amphibian/train.json',
data_prefix=dict(img='images/'),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=24,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/ak_P3_amphibian/test.json',
data_prefix=dict(img='images/'),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# evaluators
val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')]
test_evaluator = val_evaluator
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
385fe72cda2198ecc51b116e6215cc8c0d8e7955 | 161dcb4b1f3939231728e91a8129a2571842d23a | /unit_09/4.py | 8932eb0bf69d1f4f441ffe3adeef6786a7aeed3f | [] | no_license | bm1120836/21-python | 3162896e1b9e41d57c4249ea5f3bcaf06eef0361 | 8924f9b53e68b08f9203f48b215ea5b3a420d075 | refs/heads/master | 2023-05-03T16:11:42.864607 | 2015-10-01T13:26:29 | 2015-10-01T13:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | filename = 'examples/portfolio.csv'
a_file = open(filename)
a_str = a_file.read()
print(a_str)
print(a_file.name)
print(a_file.encoding)
print(a_file.mode)
print('file seek')
print(a_file.seek(0))
print('file read(16)')
print(a_file.read(16))
print('file read(1)')
print(a_file.read(1))
print('file tell')
print(a_file.tell())
print('file read(1)')
print(a_file.read(1))
print('file tell')
print(a_file.tell())
line_number = 1
with open('examples/favorite-people.txt', encoding='utf-8') as a_file:
for a_line in a_file:
print('{:>4} {}'.format(line_number, a_line.rstrip()))
line_number += 1
| [
"janusnic@gmail.com"
] | janusnic@gmail.com |
a0c52d2269793b3e8ea7cec09cd1a740d775da41 | a80874300e561174068bf510608465bb318a35f2 | /guhaisong/edu_information/edu_information/spiders/news_eastday_com_gd2008_world_62.py | 0940c07215ded42feab0cfe34bd4e13ab71c2176 | [] | no_license | lemonbiz/guhaisong | effa8af4b679511e4fa8017d71fe26ab2ce51392 | 029890f8e3c6954efdefb184fa077f2ce646d1df | refs/heads/master | 2022-12-13T08:21:37.911535 | 2020-09-15T16:15:10 | 2020-09-15T16:15:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,061 | py | # -*- coding: utf-8 -*-
import datetime
import scrapy
from urllib.parse import urljoin
from edu_information.commom.commom_method import summay_slice,title_slice,keyman_slice,writer_slice,news_source_slice,requests_detail_page
import re,time
from edu_information.commom.custom_settings import *
from edu_information.commom.bloomfilter import BloomFilter,BL
from edu_information.commom.filter import contentfilter
from scrapy.selector import Selector
from ..items import EduInformationItem
class XueqianSpider(scrapy.Spider):
name = "news_eastday_com_gd2008_world_62"
allowed_domains = ["news.eastday.com"]
start_urls = ["http://news.eastday.com/gd2008/world/index.html","http://news.eastday.com/eastday/13news/auto/news/world/index_K32.html"]
custom_settings = {"DOWNLOAD_DELAY": 0.2}
class_id = 62
num = 1
items = EduInformationItem()
flags = True
bf = BloomFilter()
next_index = ""
header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept - Encoding": "gzip, deflate",
"Accept - Language": "zh-CN,zh;q=0.9",
"Cache - Control": "no - cache",
# "Connection": "keep - alive",
"Host": "news.eastday.com",
"Pragma": "no - cache",
"Referer": "http://news.eastday.com",
"Upgrade - Insecure - Requests": 1,
"User - Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
}
def parse(self, response):
node_obj = response.xpath('''//div[@id="left"]/ul/li|//div[@class="leftsection"]/ul/li''')
if not node_obj:
print("error_spider",self.name)
for detail in node_obj:
url = detail.xpath('a/@href').extract_first()
time_node = detail.xpath('span[@class="hui12"]/text()|span[@class="black12 fr text4"]/text()').extract_first(default="").strip()
url = urljoin(response.url, url)
if url == None or url =="":
pass
else:
if BL:
if self.bf.isContains(url): # 判断字符串是否存在
print('url exists!')
else:
self.bf.insert(url)
print("请求详情页:",url)
yield scrapy.Request(url,callback=self.parse_detail,headers=self.header,meta={"time_node":time_node})
else:
yield scrapy.Request(url, callback=self.parse_detail, headers=self.header,
meta={"time_node": time_node})
# # # 多页
# next_node = response.xpath('''//div[@class="plist"]/div/a[contains(text(),"下一页")]/@href''').extract_first()
# if next_node != None:
# next_page = urljoin(response.url,next_node)
# print("请求下页链接:",next_page)
# yield scrapy.Request(next_page, callback=self.parse)
def parse_detail(self,response):
#标题title
title = response.xpath('//div[@id="biaoti"]/text()').extract_first(default="")
title = title.strip()
title = title_slice(title)
#关键字keyman
keyman = response.xpath('''//meta[@name="keywords"]/@content''').extract_first(default="")
if keyman:
keyman = keyman_slice(keyman)
else:
keyman = ""
if title:
#简介summary
try:
summary = response.xpath('//meta[@name="description"]/@content').extract_first(default="").strip()
summary = summary.replace("东方网-东方新闻-", "")
except Exception as e:
summary = ""
summary = summay_slice(summary)
index_node = response.xpath('string(//div[@class="time grey12a fc lh22"]/p[last()])').extract_first()
try:
time_node = response.meta.get("time_node","")
time_node = time_node.replace("/","-")
news_time = datetime.datetime.strptime(str(time_node).strip(),"%Y-%m-%d %H:%M:%S")
news_time = int(time.mktime(news_time.timetuple()))
except Exception as e:
print(e,"time")
news_time = None
# '来源:新华社 作者:胡浩 林晖 朱基钗 史竞男 选稿:刘晓晶 '
#writer作者
try:
writer = re.search(r".*?作者:(.*?)选稿:.*?", index_node,re.S).group(1)
writer = writer.strip()
except Exception as e:
print(e,"writer")
writer = writer_defined
writer = writer_slice(writer)
# 新闻来源news_source
try:
source = re.search(r".*?来源:(.*?)作者:.*?", index_node,re.S).group(1)
source = source.strip()
except Exception as e:
try:
source = re.search(r".*?来源:(.*?)选稿:.*?", index_node, re.S).group(1)
source = source.strip()
except Exception as e:
try:
source = re.search(r".*?来源:(.*)", index_node, re.S).group(1)
source = source.strip()
except Exception as e:
print(e,"source")
source = news_source_defined
news_source = news_source_slice(source)
#新闻内容content
content = response.xpath('//div[@id="zw"]').extract_first()
content = content.replace(" ", "")
content = content.replace(" ", "")
content = content.replace("    ", "")
content = content.replace("&", "")
content = content.replace("nbsp", "")
content = content.replace("&nbsp", "")
content = contentfilter(content)
self.items["news_keyman"] = keyman
self.items["title"] = title
self.items["content"] = content
self.items['content_summary'] = summary
self.items['click_num'] = click_num
self.items['news_time'] = news_time
self.items['news_source'] = news_source
self.items['writer'] = writer
#
#
self.items["class_id"] = self.class_id
self.items["user_id"] = user_id
self.items["istop"] = istop
self.items["ismember"] = ismember
self.items["userfen"] = userfen
self.items["isgood"] = isgood
self.items["user_name"] = "admin"
self.items["group_id"] = group_id
self.items["plnum"] = plnum
self.items["first_title"] = first_title
self.items["is_qf"] = is_qf
self.items["totaldown"] = totaldown
self.items["have_html"] = have_html
self.items["last_dotime"] = int(time.time())
self.items["diggtop"] = diggtop
self.items["stb"] = stb
self.items["ttid"] = ttid
self.items["ispic"] = ispic
self.items["isurl"] = isurl
self.items["fstb"] = fstb
self.items["restb"] = restb
self.items["news_tem_pid"] = news_tem_pid
self.items["dokey"] = dokey
self.items["closepl"] = closepl
self.items["haveaddfen"] = haveaddfen
self.items["infotags"] = keyman
self.items["checked"] = checked
self.items["keyid"] = keyid
self.items["news_path"] = news_path
self.items["titlepic"] = titlepic
self.items["ftitle"] = ftitle
#
#
self.items['filename'] = filename
self.items['titlefont'] = titlefont
self.items['title_url_z'] = title_url_z
self.items['originalurl'] = response.url
#
yield self.items
| [
"xjl12322@126.com"
] | xjl12322@126.com |
b35f1a4ab850ed9cdbf6edf18e90c57f3efa4b87 | 14028bea18dcd4f89fca2306bf51dcbf6acabb44 | /apps/accounts/migrations/0032_auto_20170519_1322.py | b8a64807200ec938e87b45d1deac6b6a1e3a2b96 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | CMSgov/bluebutton-web-server | 5694c7149d9f2f6efed9a2814c928d8a7539e4cb | fb0904c0b9d77dfb00523fe6ce69b946b640441e | refs/heads/master | 2023-09-01T18:17:31.088628 | 2023-08-25T20:43:14 | 2023-08-25T20:43:14 | 50,062,960 | 30 | 33 | NOASSERTION | 2023-09-14T10:24:34 | 2016-01-20T21:52:00 | Python | UTF-8 | Python | false | false | 1,783 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-05-19 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0031_auto_20170517_1350'),
]
operations = [
migrations.AddField(
model_name='requestinvite',
name='user_type',
field=models.CharField(choices=[('BEN', 'Beneficiary'), ('DEV', 'Developer')], default='', max_length=3),
),
migrations.AlterField(
model_name='requestinvite',
name='organization',
field=models.CharField(blank=True, default='', max_length=150),
),
migrations.AlterField(
model_name='userprofile',
name='aal',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'AAL1'), ('2', 'AAL2'), ('3', 'AAL3')], default='1', help_text='See NIST SP 800 63 B for definitions.', max_length=1, verbose_name='Authenticator Assurance Level'),
),
migrations.AlterField(
model_name='userprofile',
name='ial',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'IAL1'), ('2', 'IAL2'), ('3', 'IAL3')], default='', help_text='See NIST SP 800 63 A for definitions.', max_length=1, verbose_name='Identity Assurance Level'),
),
migrations.AlterField(
model_name='userprofile',
name='loa',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'LOA-1'), ('2', 'LOA-2'), ('3', 'LOA-3'), ('4', 'LOA-4')], default='', help_text='Legacy and Deprecated. Using IAL AAL is recommended.', max_length=1, verbose_name='Level of Assurance'),
),
]
| [
"aviars@videntity.com"
] | aviars@videntity.com |
7f1badc4cc8eeef844cccd8d948e1135d2df5301 | 50cce7441685fdc5b9d1bd2b80272078f637e7c9 | /SingleTop/test/macros/ZjetSF_2.py | e7b64f1dc42fe10dff576632df9ae006aecf38c1 | [] | no_license | dnoonan08/tWAnalysisCode | 4b622177f9401007cf873a295d71b1cee4140396 | 34a3ed1cc92ff317bbebc6c54cb634d32572999a | refs/heads/master | 2021-01-19T20:18:42.857885 | 2014-12-31T04:20:47 | 2014-12-31T04:20:47 | 22,884,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #!/usr/bin/env python
###SF from the Zpeak0jets region
metbins = [ 10, 20, 30, 40, 50, 60, 9999]
sf = [[0.8938539148917144, 0.94500843235182508, 1.0442753286019268, 1.1831266443479298, 1.3636037004840249, 1.5927083462800735, 1.9687105145585893],
[0.82970539332200888, 0.87674221693579291, 0.96776313658464252, 1.0935264684057753, 1.2617201102857636, 1.4802939663303316, 1.8092727571765537],
[0.95800243646141992, 1.0132746477678571, 1.1207875206192113, 1.2727268202900843, 1.4654872906822862, 1.7051227262298156, 2.1281482719406246]]
sf = [[1., 1., 1., 1., 1., 1., 1. ],
[0.82970539332200888, 0.87674221693579291, 0.96776313658464252, 1.0935264684057753, 1.2617201102857636, 1.4802939663303316, 1.8092727571765537],
[0.95800243646141992, 1.0132746477678571, 1.1207875206192113, 1.2727268202900843, 1.4654872906822862, 1.7051227262298156, 2.1281482719406246]]
def ZjetSF(met,mode):
for i in range(len(metbins)):
if met < metbins[i]:
return sf[mode][i]
return 1.
| [
"dnoonan@cern.ch"
] | dnoonan@cern.ch |
a14162e5b6a13393930258a65f239c5258f998cc | 68eb441faf3f9415fbcbc8330f9b01ad6933bede | /ebook/machinelearningdemo/MachineLearningDemo/python_day03/Demo02_Dataframe.py | 05cda830db9ce800c5ec102716c80ec285d7354b | [] | no_license | OrriO/jupyter_myworkspace | fb8e97865f15abe2fb3aa01985fdb4f34317f15f | a592ab92f38a1cd466c454bb36fd0002c75202a9 | refs/heads/master | 2023-06-01T02:00:36.986439 | 2021-07-08T13:44:26 | 2021-07-08T13:44:26 | 381,997,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | # -*- coding: utf-8 -*-
# @Time : 2018/12/5 9:18
# @Author : Z
# @Email : S
# @File : Demo02_DataFrame.py
import pandas as pd
df1 = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(df1)
print("hello git")
# 添加修改内容
###23点26分
| [
"guojj1@guahao.com"
] | guojj1@guahao.com |
0fd66913e4d6500654e1346a6071f0e86c16f33c | 7b870523b8e432384cff27fd50056da8c6a5b1e3 | /leetcode/083删除排序链表中的重复元素.py | f8238ba6c934f92a34234fd778fd7e26f68308a3 | [] | no_license | ShawDa/Coding | 93e198acdda528da608c62ca5b9e29bb0fb9e060 | b8ec1350e904665f1375c29a53f443ecf262d723 | refs/heads/master | 2020-03-25T09:20:08.767177 | 2019-09-01T06:25:10 | 2019-09-01T06:25:10 | 143,660,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # -*- coding:utf-8 -*-
__author__ = 'ShawDa'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head: 'ListNode') -> 'ListNode':
if not head or not head.next:
return head
node = head
while node and node.next:
if node.val != node.next.val:
node = node.next
else:
node.next = node.next.next
return head
| [
"1315193735@qq.com"
] | 1315193735@qq.com |
f8e8d4ca4ac680377c3eb67bf278605933850dba | c289ba5d699ce015c7f7b7176621057bb8dfe1fa | /Python/algorithm/hanoi_tower/하노이의탑.py | bec0986a931b07758de7dcad219cfe58a5898788 | [] | no_license | sug5806/Upload | abd44985281b7aeadc39c654db099451a285a360 | c7e8ca38b04bdf2076862226cceaeff5ff3cfbc6 | refs/heads/master | 2020-05-03T00:48:53.763042 | 2019-04-08T10:00:11 | 2019-04-08T10:00:11 | 178,318,964 | 0 | 0 | null | 2019-04-01T02:03:14 | 2019-03-29T02:37:45 | Jupyter Notebook | UTF-8 | Python | false | false | 314 | py | n = 0
a = "A" # 출발
b = "B" # 중간
c = "C" # 도착
def hanoi(n, a, b, c):
if n == 1:
print("{}번째 원반을 {}로 이동".format(n, c))
return
hanoi(n-1, a, c, b)
print("{}번째 원반을 {}로 이동".format(n, c))
hanoi(n-1, b, a, c)
hanoi(3, a, b, c) | [
"sug5806@gmail.com"
] | sug5806@gmail.com |
26fccac5a29754168d7ea75db7369492bf854d46 | e669b3fe7da2698da4ce02e98325ce154d2aa546 | /swaps/model/wallet/withdraw.py | 4baa2eb3a0cc154ae15f13a3bd987a77ee669be8 | [
"Apache-2.0"
] | permissive | marcellinamichie291/cash_carry_leveraged_futures_arbitrageur | 0834a911fdd6c9f1462f6f2f59926f715fc51461 | 1120ebfb487ce4987fe70e6645b36e0d7ce041ec | refs/heads/main | 2023-03-16T18:35:28.730554 | 2020-12-04T07:46:13 | 2020-12-04T07:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from swaps.constant import *
class Withdraw:
"""
The latest status for withdraws.
:member
id: The transfer id.
currency: The crypto currency to deposit.
tx_hash: The on-chain transaction hash.
amount: The number of crypto asset transferred in its minimum unit.
address: The deposit source address.
address_tag: The user defined address tag.
fee: The amount of fee taken by Huobi in this crypto's minimum unit.
created_at: The UNIX formatted timestamp in UTC for the transfer creation.
updated_at: The UNIX formatted timestamp in UTC for the transfer's latest update.
state: The withdraw state of this transfer.
"""
def __init__(self):
self.id = 0
self.type = DepositWithdraw.WITHDRAW
self.currency = ""
self.chain = ""
self.tx_hash = ""
self.amount = 0.0
self.address = ""
self.address_tag = ""
self.fee = 0.0
self.created_at = 0
self.updated_at = 0
self.state = WithdrawState.INVALID
def print_object(self, format_data=""):
from swaps.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.id, format_data + "ID")
PrintBasic.print_basic(self.currency, format_data + "Currency")
PrintBasic.print_basic(self.type, format_data + "Operator Type")
PrintBasic.print_basic(self.chain, format_data + "Chain")
PrintBasic.print_basic(self.tx_hash, format_data + "Trade Hash")
PrintBasic.print_basic(self.amount, format_data + "Amount")
PrintBasic.print_basic(self.address, format_data + "Address")
PrintBasic.print_basic(self.address_tag, format_data + "Address Tag")
PrintBasic.print_basic(self.fee, format_data + "Fee")
PrintBasic.print_basic(self.state, format_data + "Withdraw State")
PrintBasic.print_basic(self.created_at, format_data + "Create Time")
PrintBasic.print_basic(self.updated_at, format_data + "Update Time") | [
"jare@coindexlabs.com"
] | jare@coindexlabs.com |
51f4f5dc31c811838a7775de5000e66f98945f1b | 1b01dec8c454337232a6cf1046412ec98269fe5d | /examples/sync_example.py | e60390356bcbce9b05c4461bb86460e1f002efa9 | [
"BSD-3-Clause"
] | permissive | lietu/shylock | d38710220306af1e4fac638b2d24df8a8fdc3801 | 5ada3cb4bf75e2395fadb19f68ceff5ff92e6a65 | refs/heads/master | 2023-08-09T06:58:47.653545 | 2023-03-05T16:27:30 | 2023-03-05T16:27:30 | 243,951,853 | 5 | 8 | NOASSERTION | 2023-07-25T21:10:23 | 2020-02-29T10:55:05 | Python | UTF-8 | Python | false | false | 1,196 | py | from time import time
from pymongo import MongoClient
from shylock import Lock, ShylockPymongoBackend, configure
from shylock.backends.pymongo import DOCUMENT_TTL
CONNECTION_STRING = "mongodb://localhost:27017"
def main():
print("Start")
c = MongoClient(CONNECTION_STRING)
configure(ShylockPymongoBackend.create(c, "shylock_test", "shylock"))
lock_name = "test-lock"
test_lock = Lock(lock_name)
try:
with Lock(lock_name):
print("Got lock")
print("Testing re-lock")
assert not test_lock.acquire(False)
raise ValueError()
except ValueError:
print("Caught exception, lock should be released")
assert test_lock.acquire(False)
test_lock.release()
print(
f"Testing automatic release, this will take a while (~{DOCUMENT_TTL}-{DOCUMENT_TTL+60}s)."
)
# Test automatic release
start = time()
with test_lock:
lock2 = Lock(lock_name)
try:
lock2.acquire()
released = time() - start
finally:
lock2.release()
print(f"Lock automatically released after {released:.3f}s")
if __name__ == "__main__":
main()
| [
"janne.enberg@lietu.net"
] | janne.enberg@lietu.net |
efe2fd91d27dab3e24d2bc319c21afca1f2a83e6 | c309e7d19af94ebcb537f1e8655c0122dbe0cb13 | /Chapter03/01-chapter-content/argparse_positional_arguments.py | 41c0ebbfbbc78a0e5b576c61eab36691762b9e38 | [
"MIT"
] | permissive | PacktPublishing/Mastering-OpenCV-4-with-Python | 0fb82c88cb7205c7050c8db9f95a6deb3b1b3333 | 4194aea6f925a4b39114aaff8463be4d18e73aba | refs/heads/master | 2023-03-07T04:51:16.071143 | 2023-02-13T10:17:48 | 2023-02-13T10:17:48 | 151,057,527 | 375 | 226 | MIT | 2022-08-27T13:32:19 | 2018-10-01T08:27:29 | Python | UTF-8 | Python | false | false | 784 | py | """
Example to introduce argparse with a positional argument
"""
# Import the required packages
import argparse
# We first create the ArgumentParser object
# The created object 'parser' will have the necessary information
# to parse the command-line arguments into data types.
parser = argparse.ArgumentParser()
# We add a positional argument using add_argument() including a help
parser.add_argument("first_argument", help="this is the string text in connection with first_argument")
# The information about program arguments is stored in 'parser'
# Then, it is used when the parser calls parse_args().
# ArgumentParser parses arguments through the parse_args() method:
args = parser.parse_args()
# We get and print the first argument of this script:
print(args.first_argument)
| [
"fernandezvillan.alberto@gmail.com"
] | fernandezvillan.alberto@gmail.com |
21ef6f2b8e9d65d3486b9995043fba33e64b7ee2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_205/ch38_2020_03_17_21_32_21_644302.py | 2e02b97d73747423301a36c3f31f3d55c3ad3ac1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | def quantos_uns(x):
n = 0
x_str=str("x")
while(n<x_srt):
if "1" in x_str:
n+=1
return n
else:
return None
| [
"you@example.com"
] | you@example.com |
a7e8541c94ddc02f2d6400690083c360a1be0115 | e9fa62b4173b83ec0804329e7821a58624d3eb9f | /core/nodes/groupNode.py | c58a9a427c2692759591226e641652614218b466 | [] | no_license | UIKit0/meShaderEd | af5f0c4dc524b8be12ec6336a86c28439f1f3b1a | 683756f9c66d4613d89afdff4d1d014488b199f7 | refs/heads/master | 2021-01-17T21:16:27.037070 | 2013-12-26T12:30:26 | 2013-12-26T12:30:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #===============================================================================
# groupNode.py
#===============================================================================
import os, sys
from PyQt4 import QtCore
from core.node import Node
from core.nodeParam import NodeParam
from core.nodeNetwork import NodeNetwork
from global_vars import app_global_vars, DEBUG_MODE
import gui.ui_settings as UI
#
# GroupNode
#
class GroupNode ( Node ) :
#
# __init__
#
def __init__ ( self, xml_node = None ) :
#
Node.__init__ ( self, xml_node )
if xml_node is None :
self.type = 'nodegroup'
self.name = self.label = self.type
self.nodenet = NodeNetwork ()
if DEBUG_MODE : print '>> GroupNode( %s ).__init__' % self.label
#
# copy
#
def copy ( self ) :
if DEBUG_MODE : print '>> GrouphNode( %s ).copy' % self.label
newNode = GroupNode ()
self.copySetup ( newNode )
return newNode
#
# copySetup
#
def copySetup ( self, newNode ) :
#
if DEBUG_MODE : print '>> GrouphNode( %s ).copySetup ' % self.label
Node.copySetup ( self, newNode )
newNode.nodenet = self.nodenet.copy ()
#
# computeNode
#
def computeNode ( self ) :
#
if DEBUG_MODE : print '>> GroupNode( %s ).computeNode' % self.label
# inside controlm_code, imageName value can be assigned from different
# input parameters
self.execControlCode ()
| [
"Yuri.Meshalkin@gmail.com"
] | Yuri.Meshalkin@gmail.com |
35a6990ff5ac68e8cbda235af7ac71de5d442e3d | 7d56ddd456613b77872598360494edd411366a79 | /tests/test_is_adjacent.py | adf62949215be3e2c359bd244b743528413b6fa4 | [] | no_license | andrewswan/lwotai | 3bf6629eaed86c945fdd65f221320bd743dc18ac | 33c6435ce3f0f85668cc5414cf78c1e9c614f6a9 | refs/heads/release | 2021-01-19T10:38:55.939281 | 2017-09-17T03:38:33 | 2017-09-17T03:38:33 | 87,885,218 | 1 | 0 | null | 2017-04-11T03:41:12 | 2017-04-11T03:41:12 | null | UTF-8 | Python | false | false | 620 | py | from labyrinth_test_case import LabyrinthTestCase
from lwotai.labyrinth import Labyrinth
class IsAdjacent(LabyrinthTestCase):
"""Test isAdjacent"""
def test_is_adjacent(self):
app = Labyrinth(1, 1, self.set_up_blank_test_scenario)
self.assertTrue(app.is_adjacent("Iran", "Iraq"))
self.assertTrue(app.is_adjacent("Germany", "Spain"))
self.assertTrue(app.is_adjacent("Libya", "Italy"))
self.assertTrue(app.is_adjacent("Benelux", "Russia"))
self.assertTrue(app.is_adjacent("Lebanon", "France"))
self.assertFalse(app.is_adjacent("United States", "Lebanon")) | [
"andrew.i.swan@gmail.com"
] | andrew.i.swan@gmail.com |
38d4016261da034fd1ad2793b4f25832785265f8 | 2670452749c6299386a33391f9fb5014db0203ec | /meraki/aio/api/mg_lan_settings.py | e71f434d300a7ebdeef9def563dca0cc0a6c628d | [
"MIT"
] | permissive | npappin-wsu/dashboard-api-python | f9d3fc682b517e6bac437cd54101afd09b653274 | 5aedfc740f676fbf34e5f79269e8ece73421e3da | refs/heads/master | 2020-06-28T17:49:44.911294 | 2020-04-14T04:27:38 | 2020-04-14T04:27:38 | 255,509,439 | 0 | 0 | MIT | 2020-04-14T04:24:55 | 2020-04-14T04:24:54 | null | UTF-8 | Python | false | false | 1,508 | py | class AsyncMGLANSettings:
def __init__(self, session):
super().__init__()
self._session = session
async def getDeviceCellularGatewaySettings(self, serial: str):
"""
**Show the LAN Settings of a MG**
https://api.meraki.com/api_docs#show-the-lan-settings-of-a-mg
- serial (string)
"""
metadata = {
'tags': ['MG LAN settings'],
'operation': 'getDeviceCellularGatewaySettings',
}
resource = f'/devices/{serial}/cellularGateway/settings'
return await self._session.get(metadata, resource)
async def updateDeviceCellularGatewaySettings(self, serial: str, **kwargs):
"""
**Update the LAN Settings for a single MG.**
https://api.meraki.com/api_docs#update-the-lan-settings-for-a-single-mg
- serial (string)
- reservedIpRanges (array): list of all reserved IP ranges for a single MG
- fixedIpAssignments (array): list of all fixed IP assignments for a single MG
"""
kwargs.update(locals())
metadata = {
'tags': ['MG LAN settings'],
'operation': 'updateDeviceCellularGatewaySettings',
}
resource = f'/devices/{serial}/cellularGateway/settings'
body_params = ['reservedIpRanges', 'fixedIpAssignments']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
| [
"shiychen@cisco.com"
] | shiychen@cisco.com |
78de78847f7852c794ae07579f9696246ae7fe59 | 02bae0ffc4dea8e65cceb5ef49b8ec30ed3ab0fe | /week5/count_pairs.py | 92d77c4f651c0da25c5fb40badf33e4e6fa641f6 | [] | no_license | Gambrinius/Python_Course | 030ae41a129aa6a4ded06cc1ed9ca852bcf1c756 | 0d7529fa3dcc012b117241900d9d564d4096208b | refs/heads/master | 2020-03-06T16:01:33.322266 | 2018-06-15T17:49:40 | 2018-06-15T17:49:40 | 126,965,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | numList = list(map(int, input().split()))
# print(sum(numList.count(x) - 1 for x in numList) // 2)
counter = 0
for i in range(len(numList)):
for j in range(i + 1, len(numList)):
if numList[i] == numList[j]:
counter += 1
print(counter)
| [
"ilya.konon.95@gmail.com"
] | ilya.konon.95@gmail.com |
ec38145aaa3b9dba8286dd421d20ebdb7df5390f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_authorized_db_users_request.py | 71cbb2d260e258c598a2112f6a4543e44ec3d044 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,237 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListAuthorizedDbUsersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'db_name': 'str',
'page': 'int',
'limit': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'db_name': 'db-name',
'page': 'page',
'limit': 'limit'
}
def __init__(self, x_language=None, instance_id=None, db_name=None, page=None, limit=None):
"""ListAuthorizedDbUsersRequest
The model defined in huaweicloud sdk
:param x_language: 语言
:type x_language: str
:param instance_id: 实例ID。
:type instance_id: str
:param db_name: 数据库名。
:type db_name: str
:param page: 分页页码,从1开始。
:type page: int
:param limit: 每页数据条数。取值范围[1, 100]。
:type limit: int
"""
self._x_language = None
self._instance_id = None
self._db_name = None
self._page = None
self._limit = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
self.db_name = db_name
self.page = page
self.limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListAuthorizedDbUsersRequest.
语言
:return: The x_language of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListAuthorizedDbUsersRequest.
语言
:param x_language: The x_language of this ListAuthorizedDbUsersRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListAuthorizedDbUsersRequest.
实例ID。
:return: The instance_id of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAuthorizedDbUsersRequest.
实例ID。
:param instance_id: The instance_id of this ListAuthorizedDbUsersRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def db_name(self):
"""Gets the db_name of this ListAuthorizedDbUsersRequest.
数据库名。
:return: The db_name of this ListAuthorizedDbUsersRequest.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this ListAuthorizedDbUsersRequest.
数据库名。
:param db_name: The db_name of this ListAuthorizedDbUsersRequest.
:type db_name: str
"""
self._db_name = db_name
@property
def page(self):
"""Gets the page of this ListAuthorizedDbUsersRequest.
分页页码,从1开始。
:return: The page of this ListAuthorizedDbUsersRequest.
:rtype: int
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this ListAuthorizedDbUsersRequest.
分页页码,从1开始。
:param page: The page of this ListAuthorizedDbUsersRequest.
:type page: int
"""
self._page = page
@property
def limit(self):
"""Gets the limit of this ListAuthorizedDbUsersRequest.
每页数据条数。取值范围[1, 100]。
:return: The limit of this ListAuthorizedDbUsersRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAuthorizedDbUsersRequest.
每页数据条数。取值范围[1, 100]。
:param limit: The limit of this ListAuthorizedDbUsersRequest.
:type limit: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAuthorizedDbUsersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
4d2747bd95007d60e79741f0a86d8d7132ffc21f | 8ce5ad4d2c6fdf94f34ced61c87e080a54869924 | /ch5/buy_apple_orange.py | fd20dab3d4f17f9ab9957459224cd9f642057ba1 | [] | no_license | fuchami/zero-deep-learning | dabc70841a6ae5b92d9a353a47c10ee0a3d17609 | de423b143ca071530fa64b3efc18b92387932169 | refs/heads/master | 2020-07-03T10:35:37.085132 | 2019-09-06T07:50:40 | 2019-09-06T07:50:40 | 201,879,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | # coding:utf-8
from layer_naive import *
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
# layer
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange = AddLayer()
mul_tax_layer = MulLayer()
# forward
apple_price = mul_apple_layer.forward(apple, apple_num)
orange_price = mul_orange_layer.forward(orange, orange_num)
all_price = add_apple_orange.forward(apple_price, orange_price)
price = mul_tax_layer.forward(all_price, tax)
# backward
dprice = 1
dall_price, dtax = mul_tax_layer.backward(dprice)
dapple_price, dorange_price = add_apple_orange.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward(dapple_price)
print(price) # 715
print(dapple_num, dapple, dorange, dorange_num, dtax) # 110, 2.2, 3.3, 165, 650 | [
"famichiki.yuuki@gmail.com"
] | famichiki.yuuki@gmail.com |
ffdd91659d06d727143545bb500513b60ea0f9c5 | 4869f79cedcb8aef7f4d064bb8927ed3595c4c5e | /AnonymousGroupLogin/RegisterUser/RegisteringComponent/sleekxmpp/__init__.py | 1057895dfac385044b7f120be85a8ee7c23a037c | [] | no_license | mpetyx/xmpp-padgets-development | 622fef069e2b8f6beb15296b0d3fdd554d13535f | a0ca9ed2dd513f83ebb8cb4f4836708c82975713 | refs/heads/master | 2021-01-25T07:34:33.869597 | 2012-03-27T12:45:40 | 2012-03-27T12:45:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.basexmpp import BaseXMPP
from sleekxmpp.clientxmpp import ClientXMPP
from sleekxmpp.componentxmpp import ComponentXMPP
from sleekxmpp.stanza import Message, Presence, Iq
from sleekxmpp.xmlstream.handler import *
from sleekxmpp.xmlstream import XMLStream, RestartStream
from sleekxmpp.xmlstream.matcher import *
from sleekxmpp.xmlstream.stanzabase import StanzaBase, ET
from sleekxmpp.version import __version__, __version_info__
print "olo customies kanw! " | [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
588015cc847c31ca5a2a70db1437035557d1592b | 1284718203be50b23dcd1f6159746cfa42a04163 | /tensorflow_data/sawyer/online_data1_fine/conf.py | 27d247ce2a2ed6774e7a2e912beab6b918db7255 | [] | no_license | febert/robustness_via_retrying | 8fe4106d7705228ff339f9643518a80c0a243d36 | 1def282dc22f24b72c51ff1ef9ea1a7a83291369 | refs/heads/master | 2020-03-31T19:33:39.664525 | 2018-11-07T21:52:56 | 2018-11-07T21:52:56 | 152,502,702 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# tf record data location:
DATA_BASE_DIR = '/'.join(str.split(current_dir, '/')[:-3]) + '/pushing_data'
BASE_DIR = '/'.join(str.split(current_dir, '/')[:-3])
# local output directory
OUT_DIR = current_dir + '/modeldata'
from python_visual_mpc.video_prediction.basecls.prediction_model_basecls import Base_Prediction_Model
configuration = {
'experiment_name': 'sna',
'pred_model': Base_Prediction_Model,
# 'test_data_dir': TEST_DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': current_dir, #'directory for writing summary.' ,
'num_iterations': 200000, #'number of training iterations.' ,
'resume_pretrained': '', # 'filepath of a pretrained model to resume training from.' ,
'data_dir':[DATA_BASE_DIR+ '/weiss_gripper_20k/train',DATA_BASE_DIR + '/online_data1/train'],
'test_data_ind':1,
'load_pretrained':BASE_DIR + '/tensorflow_data/sawyer/weissgripper_basecls_20k/modeldata/model96002',
'sequence_length': 14, # 'sequence length to load, including context frames.' ,
'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' ,
'context_frames': 2, # of frames before predictions.' ,
'use_state': 1, #'Whether or not to give the state+action to the model' ,
'model': 'CDNA', #'model architecture to use - CDNA, DNA, or STP' ,
'num_masks': 10, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' ,
'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' ,
'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' ,
'batch_size': 32, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'visualize': '', #'load model from which to generate visualizations
'file_visual': '', # datafile used for making visualizations
'kern_size': 9, #size of DNA kerns
'sawyer':'',
'single_view':"",
'use_len':14, # number of steps used for training where the starting location is selected randomly within sequencelength
'1stimg_bckgd':'',
# 'visual_flowvec':'',
'adim':5,
'sdim':4,
'img_height':56,
'img_width':64,
'color_augmentation':"",
} | [
"sdasari@berkeley.edu"
] | sdasari@berkeley.edu |
126b9836584a362317e832bf74cc2bbc7a083f1b | 18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9 | /hun59.py | baa5b5f1fb712f457c9f7d03882e9ee6df6b936d | [] | no_license | mahakalai/mahak | 05f96d52880ed7b2e5eb70dd1dbf14fc533236e8 | 613be9df7743ef59b1f0e07b7df987d29bb23ec7 | refs/heads/master | 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | n=int(input())
l=[int(x) for x in input().split()]
l2=[int(x) for x in input().split()]
c=[]
for i in range(len(l)):
s=l[i]+l2[i]
c.append(s)
print(*c)
| [
"noreply@github.com"
] | mahakalai.noreply@github.com |
ecfed22a55a201c3f82849c3350765d7d6ff4eba | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/depositor.py | 9a7c26eb798400b152d2480a759e5404a12e4edc | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/jhq9-vaec/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/gender-of-subject-officers-compared-to-new-york-city-police-department-demographics-2005-2009/data.csv"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
dc310ccf36c09943bcf439e31b5b6381c1913d7e | 52ab2da7b131643a344ee5344d8f35aebd6e2eed | /WebProject1/myvenv/lib/python3.6/site-packages/sqlalchemy/sql/__init__.py | 8968d2993b0f5da270970be936ef2021846d8b59 | [
"MIT"
] | permissive | ucsb-cs48-w19/5pm-findtheroommate | cd6db6c4cf3ee6f159b04456ba13b1ef684c7546 | d9d01b95c478e7493b5b32c8b56ceed00578b188 | refs/heads/master | 2020-04-16T01:00:16.617610 | 2019-03-19T20:42:38 | 2019-03-19T20:42:38 | 165,158,037 | 2 | 1 | MIT | 2019-03-05T00:46:12 | 2019-01-11T01:28:11 | Python | UTF-8 | Python | false | false | 3,753 | py | # sql/__init__.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .expression import Alias # noqa
from .expression import alias # noqa
from .expression import all_ # noqa
from .expression import and_ # noqa
from .expression import any_ # noqa
from .expression import asc # noqa
from .expression import between # noqa
from .expression import bindparam # noqa
from .expression import case # noqa
from .expression import cast # noqa
from .expression import ClauseElement # noqa
from .expression import collate # noqa
from .expression import column # noqa
from .expression import ColumnCollection # noqa
from .expression import ColumnElement # noqa
from .expression import CompoundSelect # noqa
from .expression import Delete # noqa
from .expression import delete # noqa
from .expression import desc # noqa
from .expression import distinct # noqa
from .expression import except_ # noqa
from .expression import except_all # noqa
from .expression import exists # noqa
from .expression import extract # noqa
from .expression import false # noqa
from .expression import False_ # noqa
from .expression import FromClause # noqa
from .expression import func # noqa
from .expression import funcfilter # noqa
from .expression import Insert # noqa
from .expression import insert # noqa
from .expression import intersect # noqa
from .expression import intersect_all # noqa
from .expression import Join # noqa
from .expression import join # noqa
from .expression import label # noqa
from .expression import lateral # noqa
from .expression import literal # noqa
from .expression import literal_column # noqa
from .expression import modifier # noqa
from .expression import not_ # noqa
from .expression import null # noqa
from .expression import nullsfirst # noqa
from .expression import nullslast # noqa
from .expression import or_ # noqa
from .expression import outerjoin # noqa
from .expression import outparam # noqa
from .expression import over # noqa
from .expression import quoted_name # noqa
from .expression import Select # noqa
from .expression import select # noqa
from .expression import Selectable # noqa
from .expression import subquery # noqa
from .expression import table # noqa
from .expression import TableClause # noqa
from .expression import TableSample # noqa
from .expression import tablesample # noqa
from .expression import text # noqa
from .expression import true # noqa
from .expression import True_ # noqa
from .expression import tuple_ # noqa
from .expression import type_coerce # noqa
from .expression import union # noqa
from .expression import union_all # noqa
from .expression import Update # noqa
from .expression import update # noqa
from .expression import within_group # noqa
from .visitors import ClauseVisitor # noqa
def __go(lcls):
global __all__
from .. import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
from .annotation import _prepare_annotations
from .annotation import Annotated # noqa
from .elements import AnnotatedColumnElement
from .elements import ClauseList # noqa
from .selectable import AnnotatedFromClause # noqa
_prepare_annotations(ColumnElement, AnnotatedColumnElement)
_prepare_annotations(FromClause, AnnotatedFromClause)
_prepare_annotations(ClauseList, Annotated)
_sa_util.dependencies.resolve_all("sqlalchemy.sql")
from . import naming # noqa
__go(locals())
| [
"tengyue@umail.ucsb.edu"
] | tengyue@umail.ucsb.edu |
bfa7b1112caec573c39a7a869fab1368f335267a | 14c9f543d540d318d30d7acffb636e47b5d62f0a | /ctrl/ctrl/session_parameters/temi/robot_axioms.py | 3af95e75a3eae08a9f59e0ac3f2ffa37f12f4be8 | [] | no_license | Wisc-HCI/Figaro | cecd71d0f179bcfe413b657e9a8dc02be015eff6 | 20ae549dc53064d3d4f203e623e71220a3cde1e7 | refs/heads/master | 2023-04-27T11:40:02.969537 | 2021-05-19T16:26:12 | 2021-05-19T16:26:12 | 358,723,686 | 0 | 1 | null | 2021-05-17T20:54:53 | 2021-04-16T21:29:01 | Python | UTF-8 | Python | false | false | 3,917 | py | class RobotAxioms:
def __init__(self):
pass
def ensure_position_and_movement_overlap(self,moments):
# if movement is currently True and position is SOMETHING, and (1) the next movement is False and (2) the next position is ["X"], then add "movement" to the nexr position
for i in range(len(moments)-1):
curr_moment = moments[i]
next_moment = moments[i+1]
if curr_moment.tracks["movement"] == ["True"] and next_moment.tracks["movement"] != ["True"]:
curr_position = curr_moment.tracks["position"]
next_position = next_moment.tracks["position"]
if set(curr_position) != set(next_position):
next_moment.tracks["movement"] = ["True"]
def process_position_movement(self, moments):
for i in range(len(moments)):
moment = moments[i]
for human in ["h1"]:
if moment.tracks["position"] is not None:
for item in moment.tracks["position"]:
if human in item:
moment.tracks["close_to_human"] = True
moment.tracks["position"].remove(item)
# combine robot position and movement
if moment.tracks["movement"] == ["True"]:
if moment.tracks["position"] is None:
moment.tracks["position"] = ["movement"]
else:
moment.tracks["position"].append("movement")
'''
# look ahead to see if the next position is not movement
if i < len(moments) - 1:
if moments[i+1].tracks["movement"] is None:
lookahead_position = moments[i+1].tracks["position"]
if lookahead_position is not None:
for pos in lookahead_position:
#Discard position from human (it is redundant info)
detected_human_position = False
for human in ["h1"]:
if human in pos:
detected_human_position = True
if not detected_human_position:
moment.tracks["position"].append(pos)
'''
# combine human position and movement
# TODO: remove this
for human in ["h1"]:
if moment.tracks["{}_position".format(human)] is not None and any("robot" in string for string in moment.tracks["{}_position".format(human)]):
moment.tracks["{}_near_rob".format(human)] = True
moment.tracks["{}_movement".format(human)] = None
moment.tracks["{}_position".format(human)] = None
def remove_unrecognizable_objects_or_regions(self, moments, objects, regions):
# objects and regions are lists of tuples at the moment containing both name and coordinate data
# must extract only the name
'''
obj_name_list = []
for obj in objects:
obj_name_list.append(obj[0])
print(obj_name_list)
exit()
'''
####################################
for moment in moments:
if moment.tracks["position"] is not None:
to_remove = []
for pos in moment.tracks["position"]:
#print("considering {}".format(pos))
if pos in objects:
#print("removing {}".format(pos))
to_remove.append(pos)
for pos in to_remove:
moment.tracks["position"].remove(pos)
def axiom_only_final_movement_destination_matters(self,moments):
movement_started = False
movement_moments = []
for moment in moments:
if not movement_started and moment.tracks["movement"] == ["True"]:
movement_started = True
movement_moments.append(moment)
elif movement_started and moment.tracks["movement"] != ["True"]:
movement_started = False
# process movement moments
movement_moments.reverse()
init_pos = movement_moments[0].tracks["position"]
for mv in movement_moments:
if mv.tracks["position"] != init_pos:
if mv.tracks["position"] is not None:
to_remove = []
for item in mv.tracks["position"]:
if "h1" not in item:
to_remove.append(item)
for item in to_remove:
mv.tracks["position"].remove(item)
if len(mv.tracks["position"]) == 0:
mv.tracks["position"] = None
movement_moments = []
elif movement_started:
movement_moments.append(moment) | [
"dporfirio@wisc.edu"
] | dporfirio@wisc.edu |
42a5eabd43a28f32e8d007c07ca5aae29e454e35 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/1165/codes/1756_1576.py | 83c8a894e2fa689875e700b54fc225283acbf3c8 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | from numpy import*
#Sequencia de jogadas de Eusápia
v1 = array(eval(input("Insira a sequencia: ")))
#Sequencia dejogadas de Barsanulfo
v2 = array(eval(input("Insira a sequencia: ")))
i = 0
ve = 0
vb = 0
while(i < size(v1)):
if(((v1[i]== 11) and (v2[i]==33)) or ((v1[i]==22) and (v2[i]==11)) or ((v1[i]==33) and (v2[i]==22))):
ve = ve + 1
elif(((v2[i]==11) and (v1[i]==33)) or ((v2[i]==22) and (v1[i]==11)) or ((v2[i]==33) and (v1[i]==22))):
vb = vb + 1
i = i + 1
print(i)
if(ve > vb):
print("EUSAPIA")
elif(ve < vb):
print("BARSANULFO")
elif(ve == vb):
print("EMPATE") | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
03901fefe164445132e86bddd2aa7e411d6aeea7 | 9124e66c8ec04e61537473437a92b53daa32ce20 | /rojas/app19.py | 65d6e760247d6d6473079299176ccd573657bac7 | [] | no_license | erick1984linares/t10_linares_rojas | 28618baccb3472fb8d48b34f5d1107b702c399d0 | ba9462b3b881dbd3665907a7a33c4c7d80aa4251 | refs/heads/master | 2020-12-04T06:38:06.929626 | 2020-01-10T11:52:29 | 2020-01-10T11:52:29 | 231,661,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from Rojas import libreria
oppcn=0
limit=3
while (oppcn != limit):
print("########################")
print("# MENU #")
print("########################")
print("# 1. Agregar anotacion #")
print("# 2. Ver agregados #")
print("# 3. Salir #")
print("########################")
oppcn=libreria.pedir_numero("Ingrese la opcion deseada: ",1,3)
if (oppcn == 1):
if (oppcn == 2):
print("fin del programa")
| [
"ylinares@unprg.edu.pe"
] | ylinares@unprg.edu.pe |
5856fc7d02728c47ba6b6baf94230d6063b3e862 | 55c64c7a78c5f78f35f84ae1fce60d387ba11040 | /bc4py/chain/workhash.py | 9d2a80975c3daa597969793d516bf0fa1df853e0 | [
"MIT"
] | permissive | kmn/bc4py | df580de46456fed860e7fc0a812f5b46e04483c3 | 8f9ee09ed1325faad8c361a9b6c1b5abbc93cbc1 | refs/heads/master | 2020-04-13T06:04:32.273534 | 2018-12-18T02:48:41 | 2018-12-18T02:48:41 | 163,010,903 | 0 | 0 | null | 2018-12-24T17:16:21 | 2018-12-24T17:16:21 | null | UTF-8 | Python | false | false | 7,130 | py | from bc4py.config import C, BlockChainError
from multiprocessing import get_context, current_process
import threading
import logging
from os import urandom
from time import time, sleep
from yespower import hash as yespower_hash # for CPU
from x11_hash import getPoWHash as x11_hash # for ASIC
from hmq_hash import getPoWHash as hmq_hash # for GPU
from litecoin_scrypt import getPoWHash as ltc_hash # for ASIC
from shield_x16s_hash import getPoWHash as x16s_hash # for GPU
from pooled_multiprocessing import cpu_num
mp_generator = list()
mp_lock = threading.Lock()
def self_check_hash_fnc():
check_hash = b'\x00' * 80
check_list = [
(yespower_hash, b'z\x1b\xde\x0c\x01\xec\xc1\xd3\xdf\x86{\xb2;\x97>\xee\xbc\x96\xfd'
b'\x83[\x14sv\xca\xe9\xf9\xa7\x04t\xe0F'),
(x11_hash, b'\x83(\x84a\x80\x96[\xceV\xf6\x1e\x01]\xb6*\xf5b\xa6\x11\xd8^^r\x1d\x85L\x8d\x97\xe4z>\xa3'),
(hmq_hash, b'\xf9\xf2~\xbc\x96=\xe0\xed\xff\xd0\xd3&\xe5\xab&\xea\xe1\xec'
b'\x0f\x031\n\xdf\x12\xf1b zT\xeb\xd6\x86'),
(ltc_hash, b'\x16\x1d\x08v\xf3\xb9;\x10H\xcd\xa1\xbd\xea\xa73.\xe2\x10\xf7'
b'\x13\x1bB\x01<\xb49\x13\xa6U:Ki'),
(x16s_hash, b'\xcc\xa6\x1bVE\xd4\xcez3\x9b\xbf\xba\x80\x05\xeb\xd3\xa5\x86\x9bW'
b'\x01\xf8\xb6\xe5a\xc3\x9e\xd9\x8c\xca\x02\x1a')]
for hash_fnc, correct_hash in check_list:
if hash_fnc(check_hash) != correct_hash:
raise Exception('self check failed, hash module "{}".'.format(hash_fnc.__module__))
def get_workhash_fnc(flag):
if flag == C.BLOCK_YES_POW:
return yespower_hash
elif flag == C.BLOCK_X11_POW:
return x11_hash
elif flag == C.BLOCK_HMQ_POW:
return hmq_hash
elif flag == C.BLOCK_LTC_POW:
return ltc_hash
elif flag == C.BLOCK_X16R_POW:
return x16s_hash
elif flag in C.consensus2name:
raise Exception('Not found block flag {}'.format(C.consensus2name[flag]))
else:
raise Exception('Not found block flag {}?'.format(flag))
def update_work_hash(block):
if block.flag == C.BLOCK_GENESIS:
block.work_hash = b'\xff' * 32
elif block.flag == C.BLOCK_POS:
proof_tx = block.txs[0]
if proof_tx.pos_amount is None:
from bc4py.database.builder import tx_builder
txhash, txindex = proof_tx.inputs[0]
output_tx = tx_builder.get_tx(txhash)
if output_tx is None:
raise BlockChainError('Not found output {} of {}'.format(proof_tx, block))
address, coin_id, amount = output_tx.outputs[txindex]
proof_tx.pos_amount = amount
block.work_hash = proof_tx.get_pos_hash(block.previous_hash)
else:
# POW_???
hash_fnc = get_workhash_fnc(block.flag)
block.work_hash = hash_fnc(block.b)
def generate_many_hash(block, how_many):
assert block.flag != C.BLOCK_POS and block.flag != C.BLOCK_GENESIS
assert how_many > 0
# hash generating with multi-core
start = time()
with mp_lock:
f_wait = False
while True:
free_process = list()
for hash_generator in mp_generator:
if not hash_generator.lock.locked():
free_process.append(hash_generator)
if len(free_process) > 0:
break
else:
f_wait = True
sleep(0.05)
if f_wait:
logging.debug("Wait for free_process for mining... {}mSec"
.format(int((time()-start)*1000)))
request_num = how_many // len(free_process)
# throw task
for hash_generator in free_process:
hash_generator.generate(block, request_num)
block_b = None
work_hash = None
work_hash_int = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
for hash_generator in free_process:
tmp_block_b, check_hash = hash_generator.result()
check_int = int.from_bytes(check_hash, 'little')
if check_int < work_hash_int:
block_b = tmp_block_b
work_hash = check_hash
work_hash_int = check_int
block.b = block_b
block.work_hash = work_hash
block.deserialize()
return time() - start
def start_work_hash(process=None):
if current_process().name != 'MainProcess':
raise Exception('Is not main process!')
if len(mp_generator) != 0:
raise Exception('Already mp_generator is filled.')
if process is None:
process = cpu_num
for index in range(1, process + 1):
# Want to use 1 core for main-thread
hash_generator = HashGenerator(index=index)
hash_generator.start()
mp_generator.append(hash_generator)
def close_work_hash():
for hash_generator in mp_generator:
hash_generator.close()
mp_generator.clear()
logging.debug("Close hashing process.")
def _pow_generator(pipe):
binary = None
while True:
try:
binary, block_flag, how_many = pipe.recv()
hash_fnc = get_workhash_fnc(block_flag)
hashed = hash_fnc(binary)
minimum_num = int.from_bytes(hashed, 'little')
new_binary = binary
for i in range(how_many):
new_binary = new_binary[:-4] + urandom(4)
new_hash = hash_fnc(new_binary)
new_num = int.from_bytes(new_hash, 'little')
if minimum_num > new_num:
binary = new_binary
hashed = new_hash
minimum_num = new_num
pipe.send((binary, hashed))
except Exception as e:
msg = "Hashing failed {} by \"{}\"".format(binary, e)
try:
pipe.send(msg)
except Exception as e:
logging.info("Close by pipe error, {}".format(e))
return
class HashGenerator:
def __init__(self, index):
self.index = index
cxt = get_context('spawn')
parent_conn, child_conn = cxt.Pipe(duplex=True)
self.process = cxt.Process(
target=_pow_generator, name="Hashing{}".format(index), args=(child_conn,))
self.process.daemon = True
self.parent_conn = parent_conn
self.lock = threading.Lock()
def start(self):
self.process.start()
logging.info("Start work hash gene {}".format(self.index))
def close(self):
if self.process.is_alive():
self.process.terminate()
self.parent_conn.close()
def generate(self, block, how_many):
self.lock.acquire()
self.parent_conn.send((block.b, block.flag, how_many))
def result(self):
data = self.parent_conn.recv()
self.lock.release()
if isinstance(data, tuple):
return data
else:
raise BlockChainError('Unknown status on pipe {}'.format(data))
self_check_hash_fnc()
__all__ = [
"get_workhash_fnc",
"start_work_hash",
"update_work_hash",
"generate_many_hash",
"close_work_hash"
]
| [
"thhjuu@yahoo.co.jp"
] | thhjuu@yahoo.co.jp |
2f41fce2486a6fd898fa969a55cd13b94650392e | 882c865cf0a4b94fdd117affbb5748bdf4e056d0 | /python/BOJ/08_DP/1915_가장큰정사각형.py | 553f36b68765d60dd1500395a58dfec90db8a5c6 | [] | no_license | minhee0327/Algorithm | ebae861e90069e2d9cf0680159e14c833b2f0da3 | fb0d3763b1b75d310de4c19c77014e8fb86dad0d | refs/heads/master | 2023-08-15T14:55:49.769179 | 2021-09-14T04:05:11 | 2021-09-14T04:05:11 | 331,007,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | n, m = map(int, input().split())
arr = [[0 for _ in range(m+1)] for i in range(n+1)]
DP = [[0 for _ in range(m+1)] for i in range(n+1)]
ans = 0
for i in range(n):
for idx, j in enumerate(list(map(int, list(input())))):
arr[i+1][idx+1] = j
for i in range(1, n+1):
for j in range(1, m+1):
if arr[i][j]:
DP[i][j] = min(DP[i-1][j], DP[i][j-1], DP[i-1][j-1])+1
ans = max(ans, DP[i][j])
print(ans**2)
| [
"queen.minhee@gmail.com"
] | queen.minhee@gmail.com |
9e81fd44c79e908e7b9db8f27e74f9b6311874d5 | 2c7de112498f65c7b6a74888490266ec6c772e47 | /PDSim/core/journal_bearing.py | 04b5ebc907b447af1da97d9e2d092cf83768c42a | [] | no_license | bansal16/pdsim | 16c83dfc1af9816c369e07b6ef50e74658359c22 | 9098086c558d6b23c25d1b9e45ea86186905f41a | refs/heads/master | 2020-12-31T01:48:08.672061 | 2015-04-08T02:14:09 | 2015-04-08T02:14:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,225 | py | from __future__ import division
import numpy as np
from math import pi, atan
import scipy.optimize
import matplotlib.pyplot as plt
N = 61
#e_mat=[0.2,0.25,0.3,0.35,0.4,0.5,0.6,0.7,0.8,0.9];
phi_star = pi
def TwoDGriddedIntegrate(I,N):
# Average the center of each cell based on its neighboring nodes
return np.sum(np.sum((I[0:N-1,0:N-1]+I[1:N,0:N-1]+I[0:N-1,1:N]+I[1:N,1:N])))/4
def TwoDGriddedIntegrate2(PHI,Y,I):
#Integrate along phi direction for each y, then do a trapezoidal integration of each of the y
plt.plot(Y[1,:],np.trapz(I,PHI,axis = 0))
plt.show()
return np.trapz(np.trapz(I,PHI,axis = 0),Y[1,:])
def OBJECTIVE(phi_star, epsilon, plot = False, output = False):
PHI = np.tile(np.linspace(0,phi_star,N).T,(N,1)).T
Y = np.tile(np.linspace(0,1,N),(N,1))
dPHI = phi_star/(N-1)
dY = 1/(N-1)
sinPHI=np.sin(PHI)
P = 0*PHI
Pnew = 0*PHI
f = 0*PHI
df = 0*PHI
_lambda = 1
change = 999
eps=1e-6;
count=0;
while (change>eps):
#Calculate geometric parameters
H=1+epsilon*np.cos(PHI);
H3=H**3;
#Coefficients
A=H3[2:N,1:N-1]
B=H3[0:N-2,1:N-1]
C=H3[1:N-1,1:N-1]
#Calculate residuals
f[1:N-1,1:N-1] = -(4*A+4*B+2*_lambda*dPHI**2/dY**2*C)*P[1:N-1,1:N-1]+(3*A+B)*P[2:N,1:N-1]+(A+3*B)*P[0:N-2,1:N-1]+(_lambda**2*dPHI**2/dY**2*C)*(P[1:N-1,2:N]+P[1:N-1,0:N-2])+24*dPHI**2*epsilon*sinPHI[1:N-1,1:N-1]
#Calculate derivative
df[1:N-1,1:N-1]=-(4*A+4*B+2*_lambda*dPHI**2/dY**2*C);
#Evaluate P_new=P_old-f/dfdP
P[1:N-1,1:N-1]=P[1:N-1,1:N-1]-f[1:N-1,1:N-1]/df[1:N-1,1:N-1];
#Evaluate change
change=np.max(np.max(np.abs(f[1:N-1,1:N-1]/df[1:N-1,1:N-1])));
if count % 1000 == 0:
print change
count += 1
if output:
Wx=dY*dPHI*np.sum(np.sum(np.sin(PHI)*P))
Wz=-dY*dPHI*np.sum(np.sum(np.cos(PHI)*P))
Wr = np.sqrt(Wx**2+Wz**2)
PHI_angle = atan(Wx/Wz)
B_j = 1/(pi*Wr)
DPDPHI = 0*Y
DPDPHI[0:N-2,0:N] = (P[1:N-1,0:N]-P[0:N-2,0:N])/(dPHI)
DPDPHI[N-1:N-1,0:N] = (P[N-1:N,0:N]-P[N-2:N-2,0:N])/(dPHI)
integrand = 1/H
#integrand = H/2*DPDPHI+1/H
Fb1 = dPHI*dY*np.sum(np.sum(integrand))
Fb2 = dPHI*dY*TwoDGriddedIntegrate(integrand,N)
Fb3 = TwoDGriddedIntegrate2(PHI,Y,integrand)
mu_rb_c = Fb3/Wr # mu*r_b/c
print 'Fb1,Fb2,Fb3',Fb1,Fb2,Fb3
print 'B_j', B_j
print 'mu*rb/c', mu_rb_c
#print 'mu*rb/c', mu_rb_c/12.8
print 'PHI_angle', PHI_angle/pi*180
plt.contour(PHI,Y,H/2*DPDPHI+1/H)
plt.show()
if plot:
plt.contour(PHI,Y,P,30)
plt.show()
return np.sum(3*P[N-1,N//2+1]-4*P[N-2,N//2+1]+P[N-3,N//2+1])/(2*dPHI)
if __name__=='__main__':
#print scipy.optimize.newton.__doc__; quit()
phi_star = scipy.optimize.newton(OBJECTIVE, pi, args = (0.6,), tol = 0.004)
OBJECTIVE(phi_star,0.6,plot = True, output = True) | [
"ian.h.bell@gmail.com"
] | ian.h.bell@gmail.com |
146eda27bba2e7af27bc4756c5c0cd8650510af9 | 60e38d3122cfb18cf8901e0d7fba02ef2a32affa | /notebooks/converted_notebooks/rotate_and_crop_images.py | 3f4def955ba5f7466af281090a2d737a65fe3d19 | [
"BSD-3-Clause"
] | permissive | earnestdl/python_notebooks | ac11b40d9d5e721b947b083b2f4c301079f206a8 | 4ef31711b70b90cf621e9e9d094fa2a43eeeae16 | refs/heads/master | 2023-03-12T19:41:44.229158 | 2021-02-22T15:41:57 | 2021-02-22T15:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/rotate_and_crop_images)
#
# <img src='__docs/__all/notebook_rules.png' />
# # Select Your IPTS
# +
from __code.ui_builder import UiBuilder
o_builder = UiBuilder(ui_name = 'ui_rotate_and_crop.ui')
from __code.load_images import LoadImages
from __code.rotate_and_crop_images import RotateAndCropImages, Export
from __code import system
system.System.select_working_dir()
from __code.__all import custom_style
custom_style.style()
# + run_control={"frozen": false, "read_only": false}
# %gui qt
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select and Load Working Images
# + [markdown] run_control={"frozen": false, "read_only": false}
# Select the images (tiff or fits) you want to crop and/or rotate
# + run_control={"frozen": false, "read_only": false}
o_load = LoadImages(working_dir=system.System.get_working_dir())
o_load.select_images(use_next=True)
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select crop region and/or rotation angle
# + run_control={"frozen": false, "read_only": false}
list_images = o_load.list_images
o_crop = RotateAndCropImages(o_load = o_load)
o_crop.show()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Export Images
# + run_control={"frozen": false, "read_only": false}
rotated_working_data = o_crop.rotated_working_data
rotation_angle = o_crop.rotation_angle
o_output_folder = Export(working_dir=system.System.get_working_dir(),
data=rotated_working_data,
list_files=list_images,
rotation_angle=rotation_angle)
o_output_folder.select_folder()
# + [markdown] run_control={"frozen": false, "read_only": false}
# Cleaning notebook memory
# + run_control={"frozen": false, "read_only": false}
try:
del o_crop
del o_load
except:
pass
# -
| [
"bilheuxjm@ornl.gov"
] | bilheuxjm@ornl.gov |
204a241e69f547a1d9258401f479b7a26b973865 | c38597764dba09207302358901ff74c54d6bdd0d | /cmd3/plugins/pause.py | 2db4f149972a8ac97316c00634d84086ff18aec7 | [
"Apache-2.0"
] | permissive | futuregrid/cmd3 | 29ab97d939f8c99b02a8515ce02d7e517ca6788f | 266bf073457165679d918c238a220a3136f41ed5 | refs/heads/master | 2021-01-02T09:26:36.376760 | 2015-03-28T15:45:07 | 2015-03-28T15:45:07 | 8,934,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from cmd3.shell import command
class pause:
def activate_pause(self):
pass
@command
def do_pause(self, arg, arguments):
"""
Usage:
pause [MESSAGE]
Displays the specified text then waits for the user to press RETURN.
Arguments:
MESSAGE message to be displayed
"""
raw_input(arg + '\n')
| [
"laszewski@gmail.com"
] | laszewski@gmail.com |
bbc7675acc7ac35bcfd976febc56886686fd3b6c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_317/ch19_2020_10_07_12_53_37_322833.py | b1a16cdb8cabc49234e4f11bf87e85323c500893 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def classifica_triangulo(a,b,c):
if a == b and b==c and c==a:
return "equilátero"
elif a!=b and b!=c and c!=a:
return "escaleno"
elif a==b and b==c and c!=a:
return "isósceles" | [
"you@example.com"
] | you@example.com |
f69dafe7e3cd2bba8f46924f56fc36ccaeb49bb1 | a3d0e224a2a8487f8d271991b3cf1981b9e58791 | /python/PoissonDistribuition/solution/PoissonDistribII.py | 4c246a2f1086dd71d25a654cf77052b7b7aca74e | [] | no_license | chrislucas/hackerrank-10-days-of-statistics | c66306f55ca7e0080cecebfed497b5032f8a0007 | 94fce754274ad706b44b06f9d6ff8d96838c80d0 | refs/heads/master | 2020-03-25T23:48:11.992249 | 2018-12-05T17:35:21 | 2018-12-05T17:35:21 | 144,293,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | '''
https://www.hackerrank.com/challenges/s10-poisson-distribution-2/problem
'''
from math import e as E
def factorial(n):
acc = 1
for i in range(n, 1, -1):
acc *= i
return acc
def poisson_distribution(success, avg):
return ((avg ** success) * (E ** (-avg))) / factorial(success)
'''
a = 0.88
b = 1.55
'''
def run():
a, b = map(float, input().split(" "))
ca = 160 + 40 * (a + a * a)
cb = 128 + 40 * (b + b * b)
print("%.3f\n%.3f" % (ca, cb))
run()
if __name__ == '__main__':
pass
| [
"christoffer.luccas@gmail.com"
] | christoffer.luccas@gmail.com |
736fc586eeb80e5b1b5a212f088fc98d4a063837 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/ops/array_grad.pyi | 0ce04ead67f2347cf2ccd72e01a31d5ad3c5b6c6 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | pyi | # Stubs for tensorflow.python.ops.array_grad (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python import pywrap_tensorflow as pywrap_tensorflow
from tensorflow.python.eager import context as context
from tensorflow.python.framework import constant_op as constant_op, ops as ops, sparse_tensor as sparse_tensor, tensor_util as tensor_util
from tensorflow.python.ops import array_ops as array_ops, control_flow_util as control_flow_util, gen_array_ops as gen_array_ops, math_ops as math_ops, sparse_ops as sparse_ops
| [
"matangover@gmail.com"
] | matangover@gmail.com |
5c899471a1f6911eedcac4f5185958ee38057e03 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/blis/tests/common.py | 643d09ec1fd370778e9270a7a94dc2d2f13b6eea | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:9047f9d9f85d18d5f16a141581ac46738f83c8e1b9d1ceff33a90c06e9979143
size 2577
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
aac470ddd0fd3c5e81d63ee500ed64596d8d060c | d9b2805a8b39f147bd77e35c8e96e0cbd5eaa726 | /학교 공부/3학년1학기/운영체제 - 김정준/텀과제/osProject_피드.py | 28c84c62698c8a349e6894f628b2c94152b2f5c3 | [] | no_license | LeeInHaeng/Study | ca8e3e2d4111dc3f742eefea541a67739d729e75 | 96bdb1d224702cebb8a6de6bbd596b075ee33f7b | refs/heads/master | 2020-03-28T11:03:03.848316 | 2019-04-20T08:33:26 | 2019-04-20T08:33:26 | 148,172,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import threading
import random
import time
customCnt = int(input("전체 고객의 수를 입력하세요 : "))
bank = int(input("은행 창구의 수를 입력하세요 : "))
sem = threading.Semaphore(bank) # 세마포 객체 생성, ~개의 쓰레드로 제한
vip_sem = threading.Semaphore(1)
class RestrictedArea(threading.Thread):
def run(self):
# self.getName() : Thread-1, Thread-2 ....
custom = self.getName().replace("Thread","Custom")
CounselingTime = random.randint(5,10) # 작업소요시간 5~10초 사이
msg =('[-]%s 상담중...\n' % custom)
msg2 = ('[+]%s 상담 완료... / 상담 소요시간 : %d초\n' % (custom,CounselingTime))
sem.acquire() # unlocked --> locked
print(msg) # 쓰레드만이 존재할수 있는 영역
time.sleep(CounselingTime)
sem.release() # locked --> unlocked
print(msg2)
class RestrictedAreaVIP(threading.Thread):
def run(self):
# self.getName() : Thread-1, Thread-2 ....
vip = self.getName().replace("Thread","[[ VIP ]]")
CounselingTime = random.randint(5,10) # 작업소요시간 5~10초 사이
msg =('[[[ [-]%s 상담중... ]]]\n' % vip)
msg2 = ('[[[ [+]%s 상담 완료... / 상담 소요시간 : %d초 ]]]\n' % (vip,CounselingTime))
vip_sem.acquire() # unlocked --> locked
print(msg) # 쓰레드만이 존재할수 있는 영역
time.sleep(CounselingTime)
vip_sem.release() # locked --> unlocked
print(msg2)
vipSecond = 0
vipCnt = 0
def vipCreate():
vips = []
global vipCnt
global vipSecond
global proEnd
while proEnd:
vipSecond += 1
time.sleep(1)
if vipSecond%10==0:
print('[[[ VIP 등장! ]]]\n')
vips.append(RestrictedAreaVIP())
vips[vipCnt].start()
vipCnt+=1
for vip in vips:
vip.join()
print('%d 명의 [ VIP ] 상담 완료' % (vipCnt))
customs = []
proEnd = True
start_time = time.time()
for i in range(customCnt): # ~개수의 쓰레드
customs.append(RestrictedArea())
print(customs[i].getName().replace("Thread","Custom")+" 번호표 뽑음")
th = threading.Thread(target=vipCreate)
th.start()
for cus in customs:
cus.start() # 쓰레드 시작
for cus in customs:
cus.join() # 종료대기
print(cus.getName().replace("Thread","Custom")+" 퇴장\n")
end_time = time.time()
proEnd = False
print('%d 명의 고객 상담 완료' % (i+1))
print('총 상담 처리 시간 : %lf초' % (end_time - start_time))
| [
"lih0420@naver.com"
] | lih0420@naver.com |
17382c82886a6f79476e82a3746c9219c595aa7c | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /product/models/res_partner.py | 9d36b9fcfddf842d91ea1defac36b635e4999208 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, api
class Partner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
# NOT A REAL PROPERTY !!!!
property_product_pricelist = fields.Many2one(
'product.pricelist', 'Pricelist', compute='_compute_product_pricelist',
inverse="_inverse_product_pricelist", company_dependent=False,
help="This pricelist will be used, instead of the default one, for sales to the current partner")
@api.multi
@api.depends('country_id')
def _compute_product_pricelist(self):
company = self.env.context.get('force_company', False)
res = self.env['product.pricelist']._get_partner_pricelist_multi(self.ids, company_id=company)
for p in self:
p.property_product_pricelist = res.get(p.id)
@api.one
def _inverse_product_pricelist(self):
pls = self.env['product.pricelist'].search(
[('country_group_ids.country_ids.code', '=', self.country_id and self.country_id.code or False)],
limit=1
)
default_for_country = pls and pls[0]
actual = self.env['ir.property'].get('property_product_pricelist', 'res.partner', 'res.partner,%s' % self.id)
# update at each change country, and so erase old pricelist
if self.property_product_pricelist or (actual and default_for_country and default_for_country.id != actual.id):
# keep the company of the current user before sudo
self.env['ir.property'].with_context(force_company=self.env.user.company_id.id).sudo().set_multi(
'property_product_pricelist',
self._name,
{self.id: self.property_product_pricelist or default_for_country.id},
default_value=default_for_country.id
)
def _commercial_fields(self):
return super(Partner, self)._commercial_fields() + ['property_product_pricelist']
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
cc9344aa970edeb891170348018e4f20b39b2bc2 | 1581bacbb7e7ed2f97aa1fb903ca0cf1a351be14 | /lib/framework/__init__.py | e7df70e79c346c3778f3fc2b43c63503b14059ef | [] | no_license | COOHU-Kr/SJVA3 | f5b7287aaa658287b003300e1973b63d2f6ac567 | ef68c085d980d0eb395da21f89cf999eeca8f980 | refs/heads/main | 2023-04-12T08:58:29.074902 | 2021-05-16T12:30:02 | 2021-05-16T12:30:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,488 | py | # -*- coding: utf-8 -*-
version = '0.2.20.10'
#########################################################
# python
import os
import sys
import platform
path_app_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
path_data = os.path.join(path_app_root, 'data')
flag_system_loading = False
from datetime import datetime, timedelta
import json
import traceback
# third-party
from flask import Flask, redirect, render_template, Response, request, jsonify, send_file, send_from_directory, abort, Markup
from flask_sqlalchemy import SQLAlchemy
from flask_socketio import SocketIO, emit
from flask_login import LoginManager, login_user, logout_user, current_user, login_required
#from celery import Celery
# sjva 공용
from .init_args import args
from .py_version_func import *
from framework.class_scheduler import Scheduler
from framework.logger import get_logger
from .menu import init_menu
from .user import User
from .init_web import jinja_initialize
from .init_etc import check_api, make_default_dir, pip_install, config_initialize
#########################################################
# App 시작
#########################################################
## 기본디렉토리 생성
make_default_dir(path_data)
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
try:
# Global
logger.debug('Path app root : %s', path_app_root)
logger.debug('Path app data : %s', path_data)
logger.debug('Platform : %s', platform.system())
app = Flask('sjva')
#try:
# from flask_restful import Api
# api = Api(app)
#except:
# logger.debug('NOT INSTALLED FLASK_RESTFUL')
app.secret_key = os.urandom(24)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data/db/sjva.db?check_same_thread=False'
app.config['SQLALCHEMY_BINDS'] = {'sjva':'sqlite:///data/db/sjva.db'}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['config'] = {}
config_initialize('start')
pip_install()
db = SQLAlchemy(app, session_options={"autoflush": False})
scheduler = Scheduler(args)
#socketio = SocketIO(app, cors_allowed_origins="*") #, async_mode='gevent')
if args is not None and args.use_gevent == False:
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading')
else:
socketio = SocketIO(app, cors_allowed_origins="*") #, async_mode='gevent')
from flask_cors import CORS
CORS(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
exit_code = -1
# app route가 되어 있는데 import 해야지만 routing이 됨
from .log_viewer import *
from .manual import *
# 추후 삭제
USERS = {"sjva"+version : User("sjva"+version, passwd_hash="sjva"+version),}
# System plugin import
##########################################
from .init_celery import celery
import framework.common.celery
##########################################
# 시스템 플러그인
# 시스템 DB부터 만들자.
import system
from system.model import ModelSetting as SystemModelSetting
# epg 없이 klive 만 있고 db 파일이 없을 때 아예 다른 모듈이 로딩안되는 문제 발생
# klive에서 epg 칼럼을 참조해서 그러는것 같음. 방어코드이나 확인못함
try:
db.create_all()
except Exception as exception:
logger.error('CRITICAL db.create_all()!!!')
logger.error('Exception:%s', exception)
logger.error(traceback.format_exc())
config_initialize('auth')
system.plugin_load()
flag_system_loading = True # 로그레벨에서 사용. 필요한가??
if app.config['config']['run_by_init_db']:
logger.debug('================================================')
logger.debug('Run by init db.. exit')
sys.exit()
app.register_blueprint(system.blueprint)
config_initialize('system_loading_after')
################################################################
# 아래는 코드 동작.. 위는 import만
plugin_menu = []
plugin_menu.append(system.menu)
plugin_instance_list = {}
jinja_initialize(app)
######################################################
# 플러그인
system.LogicPlugin.custom_plugin_update()
from .init_plugin import plugin_init
plugin_init()
logger.debug('### plugin loading completed')
#####################################################
# 메뉴
init_menu(plugin_menu)
system.SystemLogic.apply_menu_link()
logger.debug('### menu loading completed')
app.config['config']['port'] = 0
if sys.argv[0] == 'sjva.py' or sys.argv[0] == 'sjva3.py':
try:
app.config['config']['port'] = SystemModelSetting.get_int('port')
if app.config['config']['port'] == 19999 and app.config['config']['running_type'] == 'docker' and not os.path.exists('/usr/sbin/nginx'):
SystemModelSetting.set('port', '9999')
app.config['config']['port'] = 9999
except:
app.config['config']['port'] = 9999
if args is not None:
if args.port is not None:
app.config['config']['port'] = args.port
app.config['config']['repeat'] = args.repeat
app.config['config']['use_celery'] = args.use_celery
if platform.system() == 'Windows':
app.config['config']['use_celery'] = False
app.config['config']['use_gevent'] = args.use_gevent
logger.debug('### config ###')
logger.debug(json.dumps(app.config['config'], indent=4))
logger.debug('### LAST')
logger.debug('### PORT:%s', app.config['config']['port'])
logger.debug('### Now you can access SJVA by webbrowser!!')
except Exception as exception:
logger.error('Exception:%s', exception)
logger.error(traceback.format_exc())
# 반드시 마지막에
#import init_route
from .init_route import *
from .util import Util
try:
from tool_expand import TorrentProcess
TorrentProcess.server_process(None, category='None')
except:
pass
"""
try:
from lib_metadata import *
except:
pass
"""
| [
"cybersol@naver.com"
] | cybersol@naver.com |
c132e2ae4180e29a9a299525b1e4ec34899ea39a | d2eaacf8189655051d0d078e39a4d924df215b96 | /termtables/__about__.py | 699d5d53309c216bc4352884aae952bc2dc24cd2 | [
"MIT"
] | permissive | jayvdb/termtables | 4fdee11ec5b713c542c0d26aa00be0103db55787 | 5508afa6d813081355d95d80f5471f2d9ada738a | refs/heads/master | 2020-08-20T12:01:02.779004 | 2019-09-26T08:48:31 | 2019-09-26T08:48:31 | 216,020,488 | 0 | 0 | MIT | 2019-10-18T12:36:25 | 2019-10-18T12:36:22 | null | UTF-8 | Python | false | false | 267 | py | __author__ = "Nico Schlömer"
__email__ = "nico.schloemer@gmail.com"
__copyright__ = "Copyright (c) 2019 {} <{}>".format(__author__, __email__)
__license__ = "License :: OSI Approved :: MIT License"
__version__ = "0.1.0"
__status__ = "Development Status :: 4 - Beta"
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
6785637051cfb8ea05984b5fe150317fe94fb5fb | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /zRm6YDfQHoesdc3rb_23.py | f98bf044cde1ef882771ddac94ca7a7d1163f5aa | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | """
Let there be a square matrix, where each square is a rectangle, and a
combination of more squares are also rectangles. To find the number of
rectangles, Pete sat down and started counting... but that's highly
inefficient.
Create a function that takes the order of the matrix as input and returns the
number of rectangles in them.
### Examples
rectangles(1) ➞ 1
rectangles(2) ➞ 9
rectangles(3) ➞ 36
### Notes
* The input will always be an integer.
* Number of rectangles are given by: `((n(n+1))/2)^2`
* Watch the video listed in the **Resources** tab to get three different formulas.
"""
def rectangles(step):
return step**2*(step+1)**2/4
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
3d72b403ae7bfb81b3d7bc29b76b7c477254a591 | d25a8b0e9f8a4f48504a49e094085f92d8d8e08a | /doc_db/db_utility.py | 7e8a6fd740a58b26dbba23e873fb50e127b99381 | [
"MIT"
] | permissive | William-Lake/doc_db | c7ad963bc0ff3a75b9a690bf44025e2aa6d1773b | 022b3e08d10d104fd838c7a094091e78d771ebe1 | refs/heads/master | 2020-03-31T01:34:59.815672 | 2019-01-30T18:36:39 | 2019-01-30T18:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | import logging
from peewee import *
from database import *
class DatabaseUtility(object):
def __init__(self):
self.logger = logging.getLogger()
self.logger.info('DatabaseUtility Loaded')
def get_saved_doc_names(self): return [Doc.name for doc in Doc.select(Doc.name)]
def get_doc_by_name(self,name): return Doc.select().where(Doc.name == name)
def save_docs(self,name_data_dict):
for name in name_data_dict.keys(): Doc.create(name=name,data=name_data_dict[name])
| [
"noreply"
] | noreply |
d4128909b323bf4c9ffdb7a50cb441e3b45941ec | 2eff7fdb6b4d61341c66e6afbf1ba63c67394d72 | /.history/codes_20201115144555.py | 079c1938cbf7e8425a8f965983bbb47a1a63132e | [] | no_license | E-STAT/speech_datacamp | 6b07390954b733d78768b24e18002579d744b58a | 8999629b0053e8662fc54ebb161f3a8a4f74d09d | refs/heads/master | 2023-01-21T06:13:40.909833 | 2020-11-23T12:32:42 | 2020-11-23T12:32:42 | 315,310,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | import wave
import numpy as np
import matplotlib.pyplot as plt
# Create audio file wave object
good_morning = wave.open('good_morning.wav', 'r')
# Read all frames from wave object
signal_gm = good_morning.readframes(-1)
# View first 10
print(signal_gm[:10])
########################Converting Soundwave byte to integers##########
# Convert good morning audio bytes to integers
soundwave_gm = np.frombuffer(signal_gm, dtype='int16')
# View the first 10 sound wave values
print(soundwave_gm[:10])
# Get the sound wave frame rate
framerate_gm = good_morning.getframerate()
# Find the sound wave timestamps
time_gm = np.linspace(start=0,
stop=len(soundwave_gm/framerate_gm), num=len(soundwave_gm))
# Print the first 10 timestamps
print(time_gm[:10])
#######plotting the wave
# Setup the title and axis titles
plt.title('Good Afternoon vs. Good Morning')
plt.ylabel('Amplitude')
plt.xlabel('Time (seconds)')
# Add the Good Afternoon data to the plot
plt.plot(time_ga, soundwave_ga, label='Good Afternoon')
# Add the Good Morning data to the plot
plt.plot(time_gm, soundwave_gm, label='Good Morning',
# Set the alpha variable to 0.5
alpha=0.5)
plt.legend()
plt.show()
| [
"owojori.tolulope@gmail.com"
] | owojori.tolulope@gmail.com |
c9e642a44b968079964309823b5b11beb0050205 | 905020fce75b4b63517ec31c601e721f5c260cd1 | /Тестирование is_prime().py | f87456f26c0167831c8087dac86c87e727c705f9 | [] | no_license | Dimaed90800/Python_Y | 7858ad46309281a89c5c1e83a0f09030996182a4 | 04092b854605cb05df439eeeb52003e585bb5a29 | refs/heads/main | 2023-01-24T04:11:17.858281 | 2020-11-17T20:42:45 | 2020-11-17T20:42:45 | 313,731,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from yandex_testing_lesson import is_prime
ans = ''
prime_nums = ['2', '3', '5', '7', '11', '13', '17', '19', '23', '29', '31',
'83', '89', '97', '101', '103', '107', '109']
for i in prime_nums:
if is_prime(i) in prime_nums:
ans = 'YES'
else:
ans = 'NO'
complicated = ['6', '9', '144', '1075', '6111']
for i in complicated:
if is_prime(i) in complicated:
ans = 'NO'
else:
ans = 'YES'
if is_prime('0') != 'ValueError' or is_prime('1') != 'ValueError':
ans = 'NO'
print('ans') | [
"noreply@github.com"
] | Dimaed90800.noreply@github.com |
9036422dcea82e711dfe2869f7bd5fd22ae042fc | 38fb82ff9f5ecee937cb950889335402aba2c7a6 | /route/migrations/0003_suggest_description.py | 18fdd3df086e6356dcde63674aefc2a8cd5563c2 | [] | no_license | squallcs12/vivu2017 | abe9c42cfd831de3411c1b986b6d5c4c4099808a | c6a3f37ee238464c9bf9de61a1c6e9f5be21f40f | refs/heads/master | 2021-01-11T22:36:21.289404 | 2017-02-02T08:11:19 | 2017-02-02T08:11:19 | 78,998,307 | 0 | 0 | null | 2017-02-02T08:11:20 | 2017-01-15T04:55:01 | Python | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 05:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('route', '0002_auto_20170124_0431'),
]
operations = [
migrations.AddField(
model_name='suggest',
name='description',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"daotranbang@gmail.com"
] | daotranbang@gmail.com |
06080b3fb0a23b3b4bc63e89f195003d3c5f50f8 | 9a9088713c917ac47c0b8713d6969b2cfcdbadac | /leetcode_python/829.Consecutive_Numbers_Sum.py | 2b564ef821aabab0906660d0a0f99482b9bb0ad7 | [] | no_license | zihuaweng/leetcode-solutions | 615fdcb9178b19764b4d30bcfe65a9f785e77270 | e431ff831ddd5f26891e6ee4506a20d7972b4f02 | refs/heads/master | 2023-02-06T03:58:26.413711 | 2020-12-26T05:23:03 | 2020-12-26T05:23:03 | 311,418,790 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
# https://leetcode.com/problems/consecutive-numbers-sum/
# https://leetcode.com/problems/consecutive-numbers-sum/discuss/129015/5-lines-C%2B%2B-solution-with-detailed-mathematical-explanation.
class Solution:
def consecutiveNumbersSum(self, N: int) -> int:
count = 1
for i in range(2, int(N**0.5+1)):
if (N-(i*i + i)/2) % i == 0:
count += 1
return count | [
"zihuaw2@uci.edu"
] | zihuaw2@uci.edu |
4b3fe77e2e0a1432c7500cff79ec5504e9928a0f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenSearchboxDowngradePreconsultModel.py | 6939163275bc9a2554845ced061bb3ab165d749c | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 883 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenSearchboxDowngradePreconsultModel(object):
def __init__(self):
self._box_id = None
@property
def box_id(self):
return self._box_id
@box_id.setter
def box_id(self, value):
self._box_id = value
def to_alipay_dict(self):
params = dict()
if self.box_id:
if hasattr(self.box_id, 'to_alipay_dict'):
params['box_id'] = self.box_id.to_alipay_dict()
else:
params['box_id'] = self.box_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenSearchboxDowngradePreconsultModel()
if 'box_id' in d:
o.box_id = d['box_id']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
5170e8a9d1354e33d0bbb1a8e191b448f5397bdd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s315916496.py | 8af060443edca9f35df49dadab2a2d70859c258b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | #dt = {} for i in x: dt[i] = dt.get(i,0)+1
import sys;input = sys.stdin.readline
inp,ip = lambda :int(input()),lambda :[int(w) for w in input().split()]
M = 10**9+7
h,w = ip()
grid = [input().strip() for i in range(h)]
dp = [[0]*w for i in range(h)]
dp[0][0] = 1
for i in range(h):
for j in range(w):
if i-1 >= 0 and grid[i-1][j] == '.':
dp[i][j] += dp[i-1][j]
if j-1 >= 0 and grid[i][j-1] == '.':
dp[i][j] += dp[i][j-1]
dp[i][j] %= M
print(dp[-1][-1]%M)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
568313b9e57d494a2f69cf8e3b087d4be843b081 | d0fe291905e1be001b3407c38e4d9702e94d7d42 | /ar/register_device.py | c5ee21483d7a3f300f479fe1124ef28a36308029 | [] | no_license | storvik/pythonremote | f508a4913fe653e51006a8456bbbf5b0aced9fd7 | a880a585a70c8a853b736ecb09f0712c96f2614b | refs/heads/master | 2021-01-01T06:33:18.628235 | 2015-09-21T04:46:34 | 2015-09-21T04:46:34 | 25,593,912 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,210 | py | import os
import string
import requests
import urllib
from .gcm import Gcm_req
from .color import color, green, red, yellow
from .load_device import load_device
from .load_computer import load_computer
from .unshorten_url import unshorten_url
# Register new device to autoremotedevices.txt
def register_device(config_path, host_name):
if os.path.isfile(config_path + 'autoremotedevices.txt'):
print(color(green,"Found registered devices. Continuing server startup.."))
else:
print(color(yellow,"Did not find any devices."))
answr = input(color(yellow,"You want to add a device? [y/n] "))
if answr in ['y','yes','Y','YES']:
register_newdevice(config_path, host_name)
else:
print(color(red,"autoremote is useless with no devices registered. Aborting..."))
exit(-1)
# Register new device
def register_newdevice(config_path, host_name):
fd = open(config_path + 'autoremotedevices.txt', 'a+') # Opening device file
# Todo: Check for existing name or key
name = input("Enter name for new device: ")
key = input("Enter personal key or characters after goo.gl/: ")
if len(key) > 5:
key_raw = unshorten_url('https://goo.gl/' + key)
if key_raw == key:
print(color(red,"Could not unshorten URL. Try with regular key if problem continues.."))
answr = input(color(yellow,"You want to try again? [y/n] "))
else:
key = key_raw.split("key=")[1]
register_sendtodevice(config_path, key)
fd.write(name+"\n"+key+"\n")
print(color(green,"Successfully added "+name+" to device list.."))
answr = input(color(yellow,"You want to add another device? [y/n] "))
else:
register_sendtodevice(config_path, key)
fd.write(name+"\n"+key+"\n")
print(color(green,"Successfully added "+name+" to device list.."))
answr = input(color(yellow,"You want to add another device? [y/n] "))
fd.close
if answr in ['y','yes','Y','YES']:
register_newdevice(config_path, host_name)
# Register computer on device
def register_sendtodevice(config_path, key):
computer = load_computer(config_path)
gcm = Gcm_req(key, computer["sender"], computer) # GCM register device message
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post("https://autoremotejoaomgcd.appspot.com/sendrequest", data=urllib.parse.urlencode(gcm.__dict__), headers=headers)
if r.text == "OK": # If message is sent
print(color(green,"Register device request successfully sent to device!"))
else:
print(color(red,"Couldn't send request. Aborting..."))
exit(-1)
def register_updatedevice(config_path):
if os.path.isfile('autoremotedevices.txt'):
devlist = load_device(config_path)
for i in range(1, len(devlist)-1, 2):
register_sendtodevice(config_path,devlist[i])
print(color(green,"Updated information on devices.."))
else:
print(color(yellow,"No 'autoremotedevices.txt', nothing done.."))
| [
"="
] | = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.