blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3b15afa593d694915db65262098bf26c3ff1509
|
5e5610e07441b320e4b6a088c0f6cc93334bba91
|
/transportation/management/commands/services.py
|
c2a831900e54fb88826d3c55805a063f27071ef5
|
[] |
no_license
|
pixmin/poimap
|
cdea21aeb753e358166474033dc68f9eac8e929f
|
b736a2bbf40467307aa2e12012347fb44be34cf9
|
refs/heads/master
| 2020-05-02T07:42:18.298709
| 2019-03-26T16:10:53
| 2019-03-26T16:10:53
| 177,825,011
| 0
| 0
| null | 2019-03-26T16:12:00
| 2019-03-26T16:12:00
| null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import GEOSGeometry
from django.utils.text import slugify
from geopy.geocoders import GoogleV3
from poimap.models import POIType
from transportation.models import Line, Route, RouteStop, Stop, Service
import csv
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--import', dest='import', action="store_true")
parser.add_argument('--export', dest='export', action="store_true")
def handle(self, *args, **options):
if options["import"] and options["export"]:
print "Only once of these arguments at a time : --import or --export"
return
if options["import"]:
Service.objects.all().delete()
with open('data/services.csv') as csvfile:
reader = csv.reader(csvfile, delimiter="|", quotechar='"')
for row in reader:
line_name, route_name, service_name, frequency_label = row
line_name = line_name.decode('utf-8')
route_name = route_name.decode('utf-8')
service_name = service_name.decode('utf-8')
line_name_slug = slugify(line_name)
route_name_slug = slugify(route_name)
route = Route.objects.get(slug=route_name_slug, line__slug=line_name_slug)
Service.objects.create(name=service_name, route=route, frequency_label=frequency_label)
elif options["export"]:
csv.register_dialect('troucelier', delimiter='|', quoting=csv.QUOTE_MINIMAL)
with open('data/export/services.csv', 'wb') as f:
writer = csv.writer(f, 'troucelier')
for line in Line.objects.all():
for route in line.routes.all():
for service in route.services.all():
writer.writerow([line.name.encode('utf-8'),
route.name.encode('utf-8'),
service.name.encode('utf-8'),
service.frequency_label.encode('utf-8')])
else:
print "Missing argument --import or --export"
return
|
[
"alban.tiberghien@gmail.com"
] |
alban.tiberghien@gmail.com
|
bb1416137eb4f898b55ecbf227c26ea57e6b504b
|
55ab64b67d8abc02907eb43a54ff6c326ded6b72
|
/scripts/addon_library/local/weight_layers/layer_scripts/WLAYER_procedural_texture.py
|
3c74ed302830f717ee1cea767d61171ba2ba06b6
|
[
"MIT"
] |
permissive
|
Tilapiatsu/blender-custom_config
|
2f03b0bb234c3b098d2830732296d199c91147d0
|
00e14fc190ebff66cf50ff911f25cf5ad3529f8f
|
refs/heads/master
| 2023-08-16T14:26:39.990840
| 2023-08-16T01:32:41
| 2023-08-16T01:32:41
| 161,249,779
| 6
| 2
|
MIT
| 2023-04-12T05:33:59
| 2018-12-10T23:25:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
from .WL_layer_functions import CustomLayerSettingsBase
import bpy
class CustomLayerSettings(CustomLayerSettingsBase):
def texture_enum_items(self, context):
items = []
for node in self.layer.layer_group.nodes:
if "TEX" in node.type:
name = node.name
items.append((node.label, name, name))
items.sort(key=lambda item: int(item[0]))
return items
def texture_enum_update(self, context):
self.node.inputs[2].default_value = int(self.texture_enum)
texture_enum: bpy.props.EnumProperty(items=texture_enum_items, update=texture_enum_update)
def on_creation(self, context):
self.texture_enum_update(context)
def draw_layer(self, context, layout):
self.draw_mix_settings(layout)
layout.separator(factor=0.5)
layout.prop(self, "texture_enum", text="", icon="TEXTURE_DATA")
layout.separator(factor=0.5)
nodes = {n.label: n for n in self.layer.layer_group.nodes}
node = nodes[self.texture_enum]
node.draw_buttons(context, layout)
layout.separator(factor=0.5)
layout = layout.column(align=True)
self.draw_node_inputs(context, layout, node)
self.draw_node_inputs(context, layout)
self.draw_adjustments_stack(context, layout)
|
[
"tilapiatsu@hotmail.fr"
] |
tilapiatsu@hotmail.fr
|
753b42ccddf3a2b6b07464b8a1830571ae36357c
|
9a72da59caf9d829cb3981a4b5e8bcde640732be
|
/releng_tool/engine/post.py
|
91998ef0745137254bcc8301649d44f51dd92fcd
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
releng-tool/releng-tool
|
0fa8c44b864ee10e7a8c8eeb54af7acc62b3cd56
|
d05eb2153c72e9bd82c5fdddd5eb41d5316592d6
|
refs/heads/main
| 2023-08-22T09:52:12.341285
| 2023-08-06T21:27:18
| 2023-08-06T21:27:18
| 155,482,664
| 12
| 2
|
BSD-2-Clause
| 2023-01-03T06:21:00
| 2018-10-31T01:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
# -*- coding: utf-8 -*-
# Copyright releng-tool
# SPDX-License-Identifier: BSD-2-Clause
from releng_tool.util.io import interim_working_dir
from releng_tool.util.io import opt_file
from releng_tool.util.io import run_script
from releng_tool.util.log import note
from releng_tool.util.log import verbose
import os
import sys
#: filename of the script to execute the post-processing operation (if any)
POST_SCRIPT = 'post'
def stage(engine, pkg, script_env): # noqa: ARG001
"""
handles the post-processing stage for a package
With a provided engine and package instance, the post-processing stage will
be processed. This stage is typically not advertised and is for advanced
cases where a developer wishes to manipulate their build environment after
package has completed each of its phases.
Args:
engine: the engine
pkg: the package being built
script_env: script environment information
Returns:
``True`` if the post-processing stage is completed; ``False`` otherwise
"""
verbose('post-processing {} (pre-check)...', pkg.name)
sys.stdout.flush()
post_script_filename = '{}-{}'.format(pkg.name, POST_SCRIPT)
post_script = os.path.join(pkg.def_dir, post_script_filename)
post_script, post_script_exists = opt_file(post_script)
if not post_script_exists:
return True
note('post-processing {}...', pkg.name)
sys.stdout.flush()
if pkg.build_subdir:
build_dir = pkg.build_subdir
else:
build_dir = pkg.build_dir
with interim_working_dir(build_dir):
if not run_script(post_script, script_env, subject='post-processing'):
return False
verbose('post-processing script executed: ' + post_script)
return True
|
[
"james.d.knight@live.com"
] |
james.d.knight@live.com
|
e73b0c2f931c70d88a494d81742b662b1f9f794a
|
d346c1e694e376c303f1b55808d90429a1ad3c3a
|
/easy/412.fizzBuzz.py
|
1d68ac4f80bf7dfcb5ab984bf7365445d85410af
|
[] |
no_license
|
littleliona/leetcode
|
3d06bc27c0ef59b863a2119cd5222dc94ed57b56
|
789d8d5c9cfd90b872be4a4c35a34a766d95f282
|
refs/heads/master
| 2021-01-19T11:52:11.938391
| 2018-02-19T03:01:47
| 2018-02-19T03:01:47
| 88,000,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
#mine
L = []
for i in range(1, n+1):
if i%15 == 0:
L.append("FizzBuzz")
elif i%3 == 0:
L.append("Fizz")
elif i%5 == 0:
L.append("Buzz")
else:
L.append(str(i))
return L
#easy
return ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n+1)]
s = Solution()
s.fizzBuzz(15)
|
[
"aria@Arias-MacBook-Pro.local"
] |
aria@Arias-MacBook-Pro.local
|
a74b1db5a96cc5517f157ef2a3dd75e49245b3eb
|
06e34e2dface0b87fa785cab7e65422a5f20ba18
|
/Solutions/165-Compare-Version-Numbers/python.py
|
048bbf21130c6b62a12402c9c0159f5b6c6472cd
|
[] |
no_license
|
JerryHu1994/LeetCode-Practice
|
c9841b0ce70451c19c8a429a3898c05b6233e1d4
|
b0ce69985c51a9a794397cd98a996fca0e91d7d1
|
refs/heads/master
| 2022-02-10T04:42:28.033364
| 2022-01-02T04:44:22
| 2022-01-02T04:44:22
| 117,118,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1list, v2list = [int(i) for i in version1.split(".")], [int(i) for i in version2.split(".")]
cmplen = min(len(v1list), len(v2list))
for i in range(cmplen):
if v1list[i] > v2list[i]:
return 1
elif v1list[i] < v2list[i]:
return -1
if len(v1list) == len(v2list): return 0
longer = 1 if len(v1list) > len(v2list) else -1
remain = v1list[cmplen:] + v2list[cmplen:]
return 0 if all([i==0 for i in remain]) else longer
|
[
"hjr01211@gmail.com"
] |
hjr01211@gmail.com
|
9c4db63f3fccff9b34f02d52409ad971da14fb48
|
069a4ac8e931b77571f90fcc845b2c88ce18e069
|
/Chapter5/stack_queue_notes.py
|
4b2ebe26b0e772e5387d2cc623594c35e635490c
|
[
"Apache-2.0"
] |
permissive
|
qimanchen/Algorithm_Python
|
0f60c38e01119d7a99469f76194fdeb363008229
|
72eabb5fcc9fafb17172879c1250d3c9553e583d
|
refs/heads/master
| 2020-04-28T17:16:28.520904
| 2020-01-31T07:27:48
| 2020-01-31T07:27:48
| 175,440,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,233
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
栈和队列:
容器 包含 元素(其他数据结构)
只支持数据项的存储和访问,不支持数据项之间的任何关系
最重要的功能: 元素的存入和取出
两种访问顺序:
先进先出
后进先出
栈概念:
元素之间只有时间的先后顺序关系,而无其他关系
后进先出
应用:
前缀表达式: 每个运算符的运算对象,就是它后面出现的几个完整表达式
后缀表达式: 与前面相反
栈与函数调用:
1、进入新的函数调用之前,保存一些信息 -- 函数调用的前序动作
2、退出上一次函数调用,需要恢复调用前的状态 -- 函数调用的后序动作
因此函数调用是有代价的
任何一个递归定义的函数,都可以通过引入一个栈保存中间结果的方式,翻译为一个非递归的过程
递归 -- 涉及函数的调用(消耗资源)
转化
非递归 -- 减少函数调用的开销
任何包含循环的程序翻译为不包含循环的递归定义
队列:
queue -- 容器
单链表可以直接实现 -- 先进先出(直接首端操作)
假性溢出
通过顺序表实现队列 -- 通过循环队列实现
简单实现通过固定大小的list
数据不变式:维护对象属性间的正确关系
基于栈的搜索 -- 深度优先搜索 -- 单条路径找个遍
基于队列的搜索 -- 广度优先搜索 -- 多条路径的进行
深度优先:
总是沿着遇到的搜索路径一路前行
当分支节点对不同分支的选择非常重要;问题简单,没有其他额外的帮助信息
状态空间小时使用
解:
可以通过栈来保存
广度优先:
只要存在达解的有穷长路径 -- 必定找到最短的路径(最近的解)
解:
需要额外的方法进行记录
时间开销 -- 访问的状态个数
几种特殊的栈与对列:
1、双端对列 --- python 中的collections包中定义了一种deque类型 -- python版的双端队列
链接表带来灵活性,但是失去了一定的效率
cpu需要整块的分级缓存单元
"""
|
[
"1033178199@qq.com"
] |
1033178199@qq.com
|
aa9c859721f3cf61a743cb7f21d7af7caf49b2b0
|
cf2959812b89424dfc537d3df4b4e64f7b50cd60
|
/tests/test_issues.py
|
881b8fd4a50a188b5e17b383c2be12b212cd3ef5
|
[
"Apache-2.0"
] |
permissive
|
Pandziura/PyRFC
|
1c91808f6897d56743a5a7e66fb24e938c672960
|
0718eeb73d45732283d117f33e3395d35e4b2795
|
refs/heads/master
| 2020-03-23T04:54:20.141190
| 2018-07-13T13:20:25
| 2018-07-13T13:20:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import socket
import pyrfc
import pytest
from tests.config import PARAMS as params, CONFIG_SECTIONS as config_sections, get_error
def utf8len(s):
return len(s.encode('utf-8'))
class TestIssues():
def setup_method(self, test_method):
""" A connection to an SAP backend system
Instantiating an :class:`pyrfc.Connection` object will
automatically attempt to open a connection the SAP backend.
:param config: Configuration of the instance. Allowed keys are:
``dtime``
returns datetime types (accepts strings and datetimes), default is False
``rstrip``
right strips strings returned from RFC call (default is True)
``return_import_params``
importing parameters are returned by the RFC call (default is False)
:type config: dict or None (default)
"""
self.conn = pyrfc.Connection(**params)
assert self.conn.alive
def test_info(self):
connection_info = self.conn.get_connection_attributes()
assert connection_info['isoLanguage'] == u'EN'
def teardown_method(self, test_method):
self.conn.close()
assert not self.conn.alive
def test_issue31(self):
"""
This test cases covers the issue 31
"""
'''
filename = 'tests/data/issue31/rfcexec.exe'
block = 1024
with open(filename, 'rb') as file1:
send = file1.read()
send_content = [{'': bytearray(send[i:i+block])} for i in range(0, len(send), block)]
result = self.conn.call('ZTEST_RAW_TABLE', TT_TBL1024=send_content)
content = bytearray()
for line in send_content:
content += line['']
assert send == content
received_content = bytearray()
for line in result['TT_TBL1024']:
received_content += line['LINE']
assert type(content) is bytearray
assert type(content) == type(received_content)
received_content = received_content[:len(content)]
assert len(content) == len(received_content)
assert content == received_content
'''
def test_issue38(self):
test = [
'string',
u'四周远处都能望见',
u'\U0001F4AA',
u'\u0001\uf4aa',
u'a\xac\u1234\u20ac\U0001F4AA'
]
for s in test:
is_input = {'ZSHLP_MAT1': s, 'ZFLTP': 123.45}
result = self.conn.call('/COE/RBP_FE_DATATYPES', IS_INPUT = is_input)['ES_OUTPUT']
assert is_input['ZSHLP_MAT1'] == result['ZSHLP_MAT1']
def test_issue40(self):
'''
# put in cache
result = self.conn.call('BAPI_USER_GET_DETAIL', USERNAME="DEMO")
# get from cache
fd = self.conn.func_desc_get_cached('S16', 'BAPI_USER_GET_DETAIL')
assert fd.__class__ is pyrfc._pyrfc.FunctionDescription
# remove from cache
self.conn.func_desc_remove('S16', 'BAPI_USER_GET_DETAIL')
try:
fd = self.conn.func_desc_get_cached('S16', 'BAPI_USER_GET_DETAIL')
assert fd.__class__ is not 'pyrfc._pyrfc.FunctionDescription'
except pyrfc.RFCError as ex:
error = get_error(ex)
assert error['code'] == 17
assert error['key'] == 'RFC_NOT_FOUND'
'''
|
[
"srdjan.boskovic@sap.com"
] |
srdjan.boskovic@sap.com
|
e92416720e9d10d9c0f591929294d10b632b2e17
|
77b16dcd465b497c22cf3c096fa5c7d887d9b0c2
|
/Francisco_Trujillo/Assignments/flaskolympics/olympics6/server.py
|
29f43c7f8562278fb156e038802a32014712f696
|
[
"MIT"
] |
permissive
|
curest0x1021/Python-Django-Web
|
a7cf8a45e0b924ce23791c18f6a6fb3732c36322
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
refs/heads/master
| 2020-04-26T17:14:20.277967
| 2016-10-18T21:54:39
| 2016-10-18T21:54:39
| 173,706,702
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('index.html')
@app.route('/process', methods = ['POST'])
def process():
buildings = {
'farm':random.randint(5,10),
'casino':random.randint(-50,50),
'cave':random.randint(0,30),
'house':random.randint(0,5)
}
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
"""
Will this work?
at first we would import the random from function to work, but nothing would happen to session data.
"""
|
[
"43941751+curest0x1021@users.noreply.github.com"
] |
43941751+curest0x1021@users.noreply.github.com
|
c6c5b98d66abced8eee513b103a30429094cface
|
6879a8596df6f302c63966a2d27f6b4d11cc9b29
|
/abc/problems030/028/c.py
|
33699ae14e6f2767c00684d9ab5c770609c9fcae
|
[] |
no_license
|
wkwkgg/atcoder
|
41b1e02b88bf7a8291b709306e54cb56cb93e52a
|
28a7d4084a4100236510c05a88e50aa0403ac7cd
|
refs/heads/master
| 2020-07-26T03:47:19.460049
| 2020-03-01T18:29:57
| 2020-03-01T18:29:57
| 208,523,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# ABC028 : C - 数を3つ選ぶマン
from itertools import combinations
ins = list(map(int, input().split()))
res = []
for xs in combinations(ins, 3):
res.append(sum(xs))
print(sorted(res, reverse=True)[2])
|
[
"yujin@komachi.live"
] |
yujin@komachi.live
|
022c0a5b59b6ab927a9a5f4463dd7ea34fc79202
|
3624e9f0a026b57ebdafa4e842b93f56e5a8504d
|
/Codeforces/54 Beta Division 2/Problem A/A.py
|
e71f611141ffce1faa0d2fef75b77557ad7af796
|
[
"MIT"
] |
permissive
|
ailyanlu1/Competitive-Programming-2
|
54109c8644d3ac02715dc4570916b212412c25c0
|
6c990656178fb0cd33354cbe5508164207012f24
|
refs/heads/master
| 2020-03-23T07:48:20.560283
| 2018-02-15T06:49:49
| 2018-02-15T06:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
s = raw_input()
l = len(s)
if 'h' in s:
i = s.index('h')
s = s[i+1:]
if 'e' in s:
i = s.index('e')
s = s[i+1:]
if 'l' in s:
i = s.index('l')
s = s[i+1:]
if 'l' in s:
i = s.index('l')
s = s[i+1:]
if 'o' in s:
print "YES"
else:
print "NO"
else:
print "NO"
else:
print "NO"
else:
print "NO"
else:
print "NO"
|
[
"adityapaliwal95@gmail.com"
] |
adityapaliwal95@gmail.com
|
a03cd0b74173e423e1504dfeef024bbbf613678d
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/eventhub/v20150801/event_hub.py
|
c58558ab4ee10df9503de6c7df040f6b0f34cf5d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 7,517
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['EventHub']
class EventHub(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
message_retention_in_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Single item in List or Get Event Hub operation
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] event_hub_name: The Event Hub name
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[int] message_retention_in_days: Number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Name of the Event Hub.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[int] partition_count: Number of partitions created for the Event Hub.
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[str] status: Enumerates the possible values for the status of the Event Hub.
:param pulumi.Input[str] type: ARM type of the Namespace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if event_hub_name is None:
raise TypeError("Missing required property 'event_hub_name'")
__props__['event_hub_name'] = event_hub_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['message_retention_in_days'] = message_retention_in_days
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['partition_count'] = partition_count
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['type'] = type
__props__['created_at'] = None
__props__['partition_ids'] = None
__props__['updated_at'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/latest:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20140901:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:EventHub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EventHub, __self__).__init__(
'azure-nextgen:eventhub/v20150801:EventHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EventHub':
"""
Get an existing EventHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return EventHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Exact time the Event Hub was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="messageRetentionInDays")
def message_retention_in_days(self) -> pulumi.Output[Optional[int]]:
"""
Number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention_in_days")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[Optional[int]]:
"""
Number of partitions created for the Event Hub.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_ids")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Enumerates the possible values for the status of the Event Hub.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The exact time the message was updated.
"""
return pulumi.get(self, "updated_at")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
5666f7847d1dd2aede0d1d0572c957bc26bf50ed
|
14d940630ab365be939fc08d3d95b0a98789bae7
|
/lab103_robo_testes.py
|
53c976a5f967be537db278f7a9d6ac845b034565
|
[] |
no_license
|
accolombini/python_completo
|
1da6f58f0c57b978d70582d96dc12b80c2d5b8a8
|
935102173a1112273b09734392dca08d76e9c749
|
refs/heads/master
| 2023-01-09T07:51:15.494101
| 2020-10-11T23:39:08
| 2020-10-11T23:39:08
| 283,790,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
"""
# Motivando -> observe os teste antes de serem rafatorados -> note a repetição dos códigos
class RoboTestes(unittest.TestCase):
def test_carregar(self):
megaman = Robo('Mega Man', bateria=50)
megaman.carregar()
self.assertEqual(megaman.bateria, 100)
def test_dizer_nome(self):
megaman = Robo('Mega Man', bateria=50)
self.assertEqual(megaman.dizer_nome(), 'BEEP BOOP BEEP BOOP. Eu sou MEGA MAN')
self.assertEqual(megaman.bateria, 49, 'A bateria deveria estar em 49%')
"""
import unittest
from lab103_robo import Robo
# Refatorando utilizando setUp() e tearDown(). Note que o serUp() cria um objeto e o deixa
# disponível para todos os métodos
class RoboTestes(unittest.TestCase):
def setUp(self):
self.megaman = Robo('Mega Man', bateria=50)
print(f'setUp() sendo executado ...')
def test_carregar(self):
self.megaman.carregar()
self.assertEqual(self.megaman.bateria, 100)
def test_dizer_nome(self):
self.assertEqual(self.megaman.dizer_nome(), 'BEEP BOOP BEEP BOOP. Eu sou MEGA MAN')
self.assertEqual(self.megaman.bateria, 49, 'A bateria deveria estar em 49%')
def tearDown(self):
print(f'tearDown() sendo executado ...')
if __name__ == '__main__':
unittest.main()
|
[
"accolombini@gmail.com"
] |
accolombini@gmail.com
|
56bae0ea261cac580770b5cc789b04b6b2ad0c17
|
a275cec1fddb6e034b4e9df72f8039536c009990
|
/codes/leetcode/merge-sorted-array.py
|
0ddbcf9899003e159cb71c435e5f14e50ece09e7
|
[] |
no_license
|
taoste/dirtysalt
|
a3cbd16710c81de65f00aa919f4e67a1fc66d226
|
bd68294fb7727d598ea1c8bf0a559247e07c1aea
|
refs/heads/master
| 2021-08-24T10:44:47.607924
| 2017-12-09T08:47:12
| 2017-12-09T08:47:12
| 113,807,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
#!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i = m - 1
j = n - 1
k = m + n - 1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
while i >= 0:
nums1[k] = nums1[i]
i -= 1
k -= 1
while j >= 0:
nums1[k] = nums2[j]
j -= 1
k -= 1
|
[
"dirtysalt1987@gmail.com"
] |
dirtysalt1987@gmail.com
|
d7223e33cf1a53d89ca0729366e2b0ddfc6f1740
|
d2e82d8bc2a4604b6e734f7521ddae2716486b96
|
/20190521/futureData_model4/record/draw_record.py
|
bf0bc758b03291cdde27003399b6195b2623e921
|
[] |
no_license
|
JudyPhy/spider
|
af74dbf8b74b335b64247b382e73b669796e5c1a
|
eb32aab272269f13a97ecea17eb6135f9e7e3d49
|
refs/heads/master
| 2021-07-05T19:07:50.427310
| 2020-08-14T09:29:49
| 2020-08-14T09:29:49
| 159,917,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
from common import common
def __getDrawRecords(draw, history_raceResults_rows):
draw_records = [0, 0, 0, 0, 0] # [No1, No2, No3, No4, All]
for race_date_No, dict in history_raceResults_rows.items():
for horse_code, row in dict.items():
plc = row['plc'].replace('DH', '')
cur_draw = row['draw']
if (plc not in common.words) and (int(cur_draw) == draw):
draw_records[4] += 1
if int(plc) == 1:
draw_records[0] += 1
elif int(plc) == 2:
draw_records[1] += 1
elif int(plc) == 3:
draw_records[2] += 1
elif int(plc) == 4:
draw_records[3] += 1
return draw_records
def GetDrawRecord(future_raceCard_rows, history_raceResults_rows):
draw_record_dict = {} # draw & [No1, No2, No3, No4, All]
draw_list = []
for race_date_No, dict in future_raceCard_rows.items():
for horse_No, row in dict.items():
draw = int(row['draw'])
if draw not in draw_list:
draw_list.append(draw)
for draw in draw_list:
draw_record_dict[draw] = __getDrawRecords(draw, history_raceResults_rows)
return draw_record_dict
|
[
"pujie@chinatsp.com"
] |
pujie@chinatsp.com
|
97c069ca064fb33b7d4719f5d2073453efe1716a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_296/ch6_2019_04_22_18_58_02_325401.py
|
499aadc8a44219fa30fdbadc9ec1e86a5ba174aa
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
def encontra_maximo(a):
i = 0
b = 0
while i < len(a):
if a[i] >= a[i-1]:
b = a[i]
i += 1
return b
lista = []
print(encontra_maximo(lista))
|
[
"you@example.com"
] |
you@example.com
|
0805d0d3bab7db888f1100aaa875e33667988239
|
1f7fce552cc68731f683ded3f831e8f4650c7197
|
/Axis16/main/migrations/0009_kartavyaregistration.py
|
1be3f60f6b5585457a1db0143d9da9c6a1c78c22
|
[] |
no_license
|
tanaypatil/axis-website
|
3985068cf1c52bb038b7174cbdf938b8b4084c03
|
b5eda2906150a38b1bb0daf8b23c9194572b849c
|
refs/heads/master
| 2020-06-13T03:14:05.855948
| 2019-06-30T13:12:11
| 2019-06-30T13:12:11
| 194,514,303
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-15 08:24
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_ornithoregistration'),
]
operations = [
migrations.CreateModel(
name='KartavyaRegistration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idnum', models.CharField(default=None, max_length=20)),
('team', models.CharField(default=None, max_length=20, unique=True)),
('fname', models.CharField(max_length=40)),
('fcollege', models.CharField(max_length=60)),
('fmail', models.EmailField(default=None, max_length=254, unique=True)),
('fcon', models.CharField(default=None, max_length=12, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('fcity', models.CharField(max_length=12, null=True)),
('sname', models.CharField(blank=True, default=None, max_length=40, null=True)),
('scollege', models.CharField(blank=True, default=None, max_length=60, null=True)),
('smail', models.EmailField(blank=True, default=None, max_length=254, null=True)),
('scon', models.CharField(blank=True, default=None, max_length=12, null=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('scity', models.CharField(blank=True, default=None, max_length=12, null=True)),
('tname', models.CharField(blank=True, default=None, max_length=40, null=True)),
('tcollege', models.CharField(blank=True, default=None, max_length=60, null=True)),
('tmail', models.EmailField(blank=True, default=None, max_length=254, null=True)),
('tcon', models.CharField(blank=True, default=None, max_length=12, null=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('tcity', models.CharField(blank=True, default=None, max_length=12, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"tanaypatil197@gmail.com"
] |
tanaypatil197@gmail.com
|
6b785ccf6de401b6de6be7b80935b38f0153b522
|
8a6cf531ed72310d7114237407302ef075171937
|
/ores/ores.py
|
8b81ab5f852c87b9c7043f4eebd231d7dbbbe645
|
[
"MIT"
] |
permissive
|
ureesoriano/ores
|
64a7f3c8a8917fe33449302c55cff23952a5719c
|
dda9db6c8737d12acbae5b0d43938d93c9e7ea8e
|
refs/heads/master
| 2020-03-17T21:54:12.610518
| 2018-05-20T08:36:13
| 2018-05-20T08:36:13
| 133,980,352
| 0
| 0
|
MIT
| 2018-05-18T16:43:18
| 2018-05-18T16:43:18
| null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
"""
This script provides access to a set of utilities for ORES
* precached -- Starts a daemon that requests scores for revisions as they happen
* score_revisions -- Scores a set of revisions using an ORES API
* stress_test -- Scores a large set of revisions at a configurable rate
* test_api -- Runs a series of tests against a live ORES API
You can also launch a set of production like applications
* applications.wsgi -- A wsgi server
* applications.celery -- A celery worker
{usage}
Options:
-h | --help Shows this documentation
<utility> The name of the utility to run
"""
import sys
import traceback
from importlib import import_module
USAGE = """Usage:
{progname} (-h | --help)
{progname} <utility> [-h | --help]
""".format(progname=sys.argv[0])
def main():
if len(sys.argv) < 2:
sys.stderr.write(USAGE)
sys.exit(1)
elif sys.argv[1] in ("-h", "--help"):
sys.stderr.write(__doc__.format(usage=USAGE))
sys.exit(1)
elif sys.argv[1][:1] == "-":
sys.stderr.write(USAGE)
sys.exit(1)
module_name = sys.argv[1]
if module_name.find("application") == 0:
module_path = "." + module_name
else:
module_path = ".utilities." + module_name
try:
sys.path.insert(0, ".")
module = import_module(module_path, package="ores")
except ImportError:
sys.stderr.write(traceback.format_exc())
sys.stderr.write("Could not find module {0}.\n".format(module_path))
sys.exit(1)
module.main(sys.argv[2:])
|
[
"aaron.halfaker@gmail.com"
] |
aaron.halfaker@gmail.com
|
f3b095aab2099cf1a4956081a8357cf4160ac645
|
9ac16f3a952475715756cd4985e9355c6c0059b6
|
/docker/app/app/backend/apps/_archive/accounts_new/profiles/models.py
|
ccc1478f984975ead9d71f229d8be54404e828e1
|
[
"BSD-3-Clause",
"ISC"
] |
permissive
|
JTarball/docker-django-polymer-starter-kit
|
14a9900bb1f4402ffffaf8a428fd600d2430d35c
|
b5250030b1646e29567c15d01ba4668c6ad535c9
|
refs/heads/master
| 2021-01-19T22:14:36.890793
| 2015-11-18T18:49:14
| 2015-11-18T18:49:14
| 46,353,679
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser
class AccountsUser(AbstractUser):
USERNAME_FIELD = 'username' # name of field on the User that is used as the unique identfier.
activation_key = models.CharField(_('activation key'), max_length=40)
# Extra Profile Fields
is_subscribed = models.BooleanField(_('subscribed'), default=False, help_text=_('Designates whether the user can is subscribed to the newsletter.'))
###########################################################################
# Note Django User has the following fields so dont Duplicate!
###########################################################################
# id
# username
# first_name
# last_name
# email
# password
# is_staff
# is_active
# is_superuser
# last_login
# date_joined
###########################################################################
# future
#bio = models.TextField()
#failed_login_attempts = models.PositiveIntegerField(default=0, editable=False)
#last_login_attempt_ip = models.CharField(default='', max_length=45, editable=False)
|
[
"james.tarball@gmail.com"
] |
james.tarball@gmail.com
|
aeee10979dd5fd6b4e0388c3fdfc64f99aa5f61d
|
9d4ed31ebe11eb3fa40b7ab809e40762446708e0
|
/Python diye Programming sekha 2nd/Tracking mails.py
|
2fe4654d874fd2edd55cfbcbc3d191bb4e519754
|
[
"MIT"
] |
permissive
|
mitul3737/My-Python-Programming-Journey-from-Beginning-to-Data-Sciene-Machine-Learning-AI-Deep-Learning
|
299dc5abbfc98ea66cda94b2b3de31ac32ab2b3c
|
ca2c15c597a64e5a7689ba3a44ce36a1c0828194
|
refs/heads/main
| 2023-04-17T20:49:03.252440
| 2021-05-16T19:07:16
| 2021-05-16T19:07:16
| 360,768,274
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
text="Email us for any feedback here: shahriyarmitul3737@gmail.com py.book@subeen.com book_py@subeen.com thank you"
import re
print(re.findall(r'[.\w]+@\w+[.]\w+',text))
|
[
"shahriyarmitul3737@gmail.com"
] |
shahriyarmitul3737@gmail.com
|
df45bd3b7577e0039d27c223860742502e62e05f
|
4ea832d725d820b0c3796a87cdb9f763a8b657cd
|
/MyTensorFlow/utils.py
|
8658fe74d9d950be224ba88447c47f03c2a9b5a1
|
[] |
no_license
|
Ollitros/DataScienceProject
|
07cc6b9577ae63eb3aede152e46d4dd5a07f8a09
|
b14e6add0c929a0820647e8d085e0c1e131d573e
|
refs/heads/master
| 2022-02-28T01:09:43.441967
| 2019-10-05T14:51:47
| 2019-10-05T14:51:47
| 120,162,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,778
|
py
|
import numpy
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
|
[
"Ollitros@gmail.com"
] |
Ollitros@gmail.com
|
68be9becb1f195d995195e96a0a50ba5b6a58bce
|
2a788f6e0db36ea2565e6b4b161827e31cc968b7
|
/test/test_convert.py
|
c00c417416589d4931e0e7b63a2f89583b2675de
|
[] |
no_license
|
djairdutra/scan-pdf
|
354626f334d471e5fe4a42fff630ac676f76a325
|
0d2d96a3bbb0b97f01b93c1a290b296e85c21d37
|
refs/heads/master
| 2020-05-28T03:03:36.338043
| 2018-11-07T22:32:43
| 2018-11-07T22:32:43
| 188,862,507
| 0
| 1
| null | 2019-05-27T14:52:05
| 2019-05-27T14:52:04
| null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
import unittest
import mock
import sys
from assertpy import assert_that
class Options(object):
pass
class ConverterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.modules['subprocess'] = mock.Mock()
def test_convert(self):
from scan_pdf import Converter
options = Options()
options.color_mode = 'bw'
options.resolution = 300
converter = Converter(options)
result = converter.convert('base', '.suffix')
import subprocess
subprocess.call.assert_called_with(['convert', '-depth', '1', '-density', '300', '-compress', 'zip', 'base.suffix', 'base.pdf'])
assert_that(result).is_equal_to(subprocess.call.return_value)
|
[
"andi@tryb.de"
] |
andi@tryb.de
|
9efef8515a5846ac87f18087a9c5a37d21d94e2b
|
2bdad552a0739f39b647678938a3c79b2fdde5fe
|
/src/old_code/mini_models/mini_models.py
|
54bf9a31a1503ac7b23d77181cafa9f45d4bc69a
|
[
"MIT"
] |
permissive
|
embeddedsamurai/single_shot_multibox_detector
|
7b59e8c0a5cadaff650896764edfef18b38f344d
|
2807da10b6e994ae72c1f287b0dfbf2f3f9116f9
|
refs/heads/master
| 2021-01-20T01:38:50.331431
| 2017-03-29T18:12:26
| 2017-03-29T18:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
import keras.backend as K
from keras.applications import VGG16
from keras.layers import Activation
from keras.layers import Convolution2D
from keras.layers import Dropout
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers import merge
from keras.layers import Reshape
from keras.models import Model
from layers import PriorBox2 as PriorBox
def mini_SSD(num_classes=21):
base_kernel_size = 4 + num_classes
aspect_ratios = (1, 2, 1/2)
num_aspect_ratios = len(aspect_ratios)
base_model = VGG16(weights='imagenet')
base_model.layers[0].name = 'input_1'
input_tensor = base_model.input
#input_tensor = base_model
#input_tensor.name = 'image_array'
for layer in base_model.layers:
layer.trainable = False
body = base_model.get_layer('block4_pool').output
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_1 = PriorBox(aspect_ratios)(body)
body = Convolution2D(32, 3, 3, border_mode='same')(branch_1)
body = Activation('relu')(body)
body = MaxPooling2D((2, 2))(body)
body = Dropout(.5)(body)
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_2 = PriorBox(aspect_ratios)(body)
body = Convolution2D(64, 3, 3, border_mode='same')(branch_2)
body = Activation('relu')(body)
body = MaxPooling2D((3, 3))(body)
body = Dropout(.5)(body)
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_3 = PriorBox(aspect_ratios)(body)
branch_1 = Reshape((-1, 4 + num_classes))(branch_1)
local_1 = Lambda(lambda x: x[:, :, :4])(branch_1)
class_1 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_1)
branch_2 = Reshape((-1, 4 + num_classes))(branch_2)
local_2 = Lambda(lambda x: x[:, :, :4])(branch_2)
class_2 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_2)
branch_3 = Reshape((-1, 4 + num_classes))(branch_3)
local_3 = Lambda(lambda x: x[:, :, :4])(branch_3)
class_3 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_3)
classification_tensor = merge([class_1, class_2, class_3], mode='concat',
concat_axis=1, name='classes')
localization_tensor = merge([local_1, local_2, local_3], mode='concat',
concat_axis=1, name='encoded_box')
output_tensor = merge([localization_tensor, classification_tensor],
mode='concat', concat_axis=-1, name='predictions')
model = Model(input_tensor, output_tensor)
return model
"""
if __name__ == '__main__':
model = mini_SSD()
model.summary()
from keras.utils.visualize_util import plot
plot(model, 'my_SSD.png')
"""
|
[
"arriaga.camargo@gmail.com"
] |
arriaga.camargo@gmail.com
|
dd3ee097cfafe78022793a50500d8412420c9b94
|
7c71776030428f86eb72d58580c263ade993cd70
|
/tests/Test_Memory_Shaper.py
|
06e3c8410e0affef7eb61bf68b04eae4483cfce3
|
[] |
no_license
|
p-christ/Action-Grammar-Reinforcement-Learning
|
0ae511039e0e86f3144644cf4b6c44249adbcb40
|
cddc88995b9f5717e81c72b94f5d03177b8c4468
|
refs/heads/master
| 2022-03-07T16:33:27.582929
| 2019-10-31T11:58:38
| 2019-10-31T11:58:38
| 218,323,156
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,710
|
py
|
from utilities.Memory_Shaper import Memory_Shaper
import numpy as np
buffer_size = 10
batch_size = 5
seed = 1
def new_reward_fn(cumulative_reward, length_of_macro_action):
"""Update reward to encourage usage of longer macro actions. The size of the improvement depends positively
on the length of the macro action"""
if cumulative_reward == 0.0: increment = 0.1
else: increment = abs(cumulative_reward)
total_change = increment * ((length_of_macro_action - 1)** 0.5) * 0.1
cumulative_reward += total_change
return cumulative_reward
def test_calculate_max_action_length():
"""Tests that calculate_max_action_length works correctly"""
memory_shaper = Memory_Shaper(buffer_size, batch_size, seed, new_reward_fn=new_reward_fn)
action_rules = {(0, 2, 33, 1, 22, 0, 0): 99, (0, 4): 2, (0, 9): 100}
assert memory_shaper.calculate_max_action_length(action_rules) == 7
action_rules = {(0, 2, 3): 99, (0, 4, 0, 0): 2, (0, 9): 100}
assert memory_shaper.calculate_max_action_length(action_rules) == 4
def test_add_adapted_experience_for_an_episode():
"""Tests that add_adapted_experience_for_an_episode works correctly"""
buffer_size = 3
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed,
new_reward_fn=new_reward_fn,
action_balanced_replay_buffer=False)
memory_shaper.reset()
states = [0, 1]
next_states = [1, 10]
rewards = [10, 5]
actions = [0, 5]
dones = [False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {6:(0, 5), 1: (1,), 2:(2,), 3:(3,), 4:(4,), 5:(5,), 0:(0,)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer) == 3
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [0.0], [1.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [6.0], [5.0, ]]))
assert all(s_rewards.numpy() == np.array([[10.0], [new_reward_fn(15.0, 2)], [5.0, ]]))
assert all(s_next_states.numpy() == np.array([[1.0], [10.0], [10.0, ]]))
assert all(s_dones.numpy() == np.array([[0.0], [1.0], [1.0]]))
buffer_size = 5
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed, new_reward_fn=new_reward_fn,
action_balanced_replay_buffer=False)
memory_shaper.reset()
states = [0, 1, 2]
next_states = [1, 10, 11]
rewards = [10, 5, -4]
actions = [0, 5, 2]
dones = [False, False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {6: (0, 5), 7: (0, 5, 2), 1: (1,), 2:(2,), 3:(3,), 4:(4,), 5:(5,), 0:(0,)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer) == 5
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[1.0], [0.0], [0.0], [2.0], [0.0]]))
assert all(s_actions.numpy() == np.array([[5.0],[0.0], [7.0], [2.0], [6.0]]))
assert np.allclose(s_rewards.numpy(), np.array([[5.0], [10.0], [np.round(new_reward_fn(11.0, 3), 5)], [-4.0], [new_reward_fn(15.0, 2)]]))
assert all(s_next_states.numpy() == np.array([[10.0], [1.0], [11.0], [11.0], [10.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [1.0], [1.0], [0.0]]))
def test_add_adapted_experience_for_an_episode_long_action_length():
"""Tests that add_adapted_experience_for_an_episode works correctly for actions with length > 2"""
buffer_size = 4
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed, new_reward_fn=new_reward_fn)
states = [0, 1, 2]
next_states = [1, 10, 11]
rewards = [10, 5, 2]
actions = [0, 1, 2]
dones = [False, False, False]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {3: (0, 1, 2), 0: (0,), 1: (1,), 2:(2, )}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer.memories[0]) == 1
assert len(replay_buffer.memories[1]) == 1
assert len(replay_buffer.memories[2]) == 1
assert len(replay_buffer.memories[3]) == 1
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [1.0,], [2.0], [0.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [1.0, ], [2.0], [3.0]]))
assert np.allclose(s_rewards.numpy(), np.array([[10.0], [5.0], [2.0], [new_reward_fn(17.0, 3)]]))
assert all(s_next_states.numpy() == np.array([[1.0], [10.0, ], [11.0], [11.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [0.0], [0.0]]))
def test_add_adapted_experience_for_multiple_episodes():
"""Tests that add_adapted_experience_for_an_episode works correctly for multiple episodes"""
# for reward_increment in [0.0, 0.5, 1.5]:
buffer_size = 6
memory_shaper = Memory_Shaper(buffer_size, 6, seed, new_reward_fn)
states = [0]
next_states = [1]
rewards = [10]
actions = [0]
dones = [False]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
states = [1]
next_states = [2]
rewards = [11]
actions = [1]
dones = [True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
states = [1, 2]
next_states = [2, 3]
rewards = [11, 2]
actions = [0, 1]
dones = [False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {0:(0,), 1:(1,), 2:(0, 1)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer.memories[0]) == 2
assert len(replay_buffer.memories[1]) == 2
assert len(replay_buffer.memories[2]) == 1
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [1.0], [2.0], [1.0], [1.0], [1.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [0.0], [1.], [1.], [2.], [2.]]))
assert np.allclose(s_rewards.numpy(), np.array([[10.0], [11.0], [2.0], [11.0], [new_reward_fn(13.0, 2)], [new_reward_fn(13.0, 2)]]))
assert all(s_next_states.numpy() == np.array([[1.0], [2.0], [3.0], [2.0], [3.0], [3.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [1.0], [1.], [1.], [1.]]))
|
[
"p.christodoulou2@gmail.com"
] |
p.christodoulou2@gmail.com
|
07cb99e1a52cc6e587019a5e17a328e0cf94ac78
|
f5d17f536bd8617ac3d56c7d5aca4002444b481d
|
/requests3/toolbelt/auth/http_proxy_digest.py
|
38e9013f3884cd95f06fbe355609f4c4d98fa14e
|
[
"Apache-2.0"
] |
permissive
|
cclauss/requests3
|
a2df822642b4ecbbf9147b1df5e804e4b3d7ca35
|
260cd50aec1bd52b58968c3dfd0d5e850d563ac2
|
refs/heads/master
| 2020-05-15T18:47:10.559821
| 2019-04-20T17:30:15
| 2019-04-20T17:30:15
| 182,370,659
| 0
| 0
|
NOASSERTION
| 2019-04-20T06:41:56
| 2019-04-20T06:41:55
| null |
UTF-8
|
Python
| false
| false
| 3,668
|
py
|
# -*- coding: utf-8 -*-
"""The module containing HTTPProxyDigestAuth."""
import re
from requests import cookies, utils
from . import _digest_auth_compat as auth
class HTTPProxyDigestAuth(auth.HTTPDigestAuth):
"""HTTP digest authentication between proxy
:param stale_rejects: The number of rejects indicate that:
the client may wish to simply retry the request
with a new encrypted response, without reprompting the user for a
new username and password. i.e., retry build_digest_header
:type stale_rejects: int
"""
_pat = re.compile(r"digest ", flags=re.IGNORECASE)
def __init__(self, *args, **kwargs):
super(HTTPProxyDigestAuth, self).__init__(*args, **kwargs)
self.stale_rejects = 0
self.init_per_thread_state()
@property
def stale_rejects(self):
thread_local = getattr(self, "_thread_local", None)
if thread_local is None:
return self._stale_rejects
return thread_local.stale_rejects
@stale_rejects.setter
def stale_rejects(self, value):
thread_local = getattr(self, "_thread_local", None)
if thread_local is None:
self._stale_rejects = value
else:
thread_local.stale_rejects = value
def init_per_thread_state(self):
try:
super(HTTPProxyDigestAuth, self).init_per_thread_state()
except AttributeError:
# If we're not on requests 2.8.0+ this method does not exist
pass
def handle_407(self, r, **kwargs):
"""Handle HTTP 407 only once, otherwise give up
:param r: current response
:returns: responses, along with the new response
"""
if r.status_code == 407 and self.stale_rejects < 2:
s_auth = r.headers.get("proxy-authenticate")
if s_auth is None:
raise IOError(
"proxy server violated RFC 7235:"
"407 response MUST contain header proxy-authenticate"
)
elif not self._pat.match(s_auth):
return r
self.chal = utils.parse_dict_header(self._pat.sub("", s_auth, count=1))
# if we present the user/passwd and still get rejected
# https://tools.ietf.org/html/rfc2617#section-3.2.1
if "Proxy-Authorization" in r.request.headers and "stale" in self.chal:
if self.chal["stale"].lower() == "true": # try again
self.stale_rejects += 1
# wrong user/passwd
elif self.chal["stale"].lower() == "false":
raise IOError("User or password is invalid")
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers["Proxy-Authorization"] = self.build_digest_header(
prep.method, prep.url
)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
else: # give up authenticate
return r
def __call__(self, r):
self.init_per_thread_state()
# if we have nonce, then just use it, otherwise server will tell us
if self.last_nonce:
r.headers["Proxy-Authorization"] = self.build_digest_header(r.method, r.url)
r.register_hook("response", self.handle_407)
return r
|
[
"me@kennethreitz.org"
] |
me@kennethreitz.org
|
c7050f5f25178f18d977879c234baea3c726f0ca
|
d88397be1c6a31985bc2283280e743fd3b988dd1
|
/nncf/structures.py
|
b29eca4968e93b8bf41744073ad75940d2d2a247
|
[
"Apache-2.0"
] |
permissive
|
sshyran/openvino-nncf-pytorch
|
f5e09066a216fa786927937a91a0e6742f347660
|
fd02652950cd803a36f5283f5a5df999bb45433b
|
refs/heads/develop
| 2023-04-18T06:58:54.646669
| 2021-03-12T15:41:39
| 2021-03-12T15:41:39
| 347,374,166
| 0
| 0
|
Apache-2.0
| 2023-04-03T23:52:21
| 2021-03-13T13:11:32
| null |
UTF-8
|
Python
| false
| false
| 6,486
|
py
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable, Any
import torch
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
from nncf.config.structure import NNCFExtraConfigStruct
class QuantizationPrecisionInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for initialization of quantization's bitwidth.
Initialization is based on calculating a measure reflecting layers' sensitivity to perturbations. The measure is
calculated by estimation of average trace of Hessian for modules using the Hutchinson algorithm.
:param criterion_fn: callable object, that implements calculation of loss by given outputs of the model, targets,
and loss function. It's not needed when the calculation of loss is just a direct call of the criterion with 2
arguments: outputs of model and targets. For all other specific cases, the callable object should be provided.
E.g. for inception-v3, the losses for two outputs of the model are combined with different weight.
:param criterion: loss function, instance of descendant of `torch.nn.modules.loss._Loss`,
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
*WARNING*: The final quantizer setup of the created compressed model is dependent on the data
provided by the data_loader. When using PyTorch's DistributedDataParallel with precision
initialization, make sure that each process in the distributed group receives the same data
from the data_loader as the other processes, otherwise the create_compressed_model call may
create different compressed model objects for each distributed process and the distributed training
will fail.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, criterion_fn: Callable[[Any, Any, _Loss], torch.Tensor], criterion: _Loss,
data_loader: DataLoader, device: str = None):
self.criterion_fn = criterion_fn
self.criterion = criterion
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "quantization_precision_init_args"
class QuantizationRangeInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for initialization of quantization's ranges.
Initialization is done by collecting per-layer activation statistics on training dataset in order to choose proper
output range for quantization.
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, data_loader: DataLoader, device: str = None):
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "quantization_range_init_args"
class BNAdaptationInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for BatchNorm statistics adaptation procedure.
Adaptation is done by inferring a number of data batches on a compressed model
while the BN layers are updating the rolling_mean and rolling_variance stats.
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, data_loader: DataLoader, device: str = None):
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "bn_adaptation_init_args"
class AutoQPrecisionInitArgs(NNCFExtraConfigStruct):
"""
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
*WARNING*: The final quantizer setup of the created compressed model is dependent on the data
provided by the data_loader. When using PyTorch's DistributedDataParallel with precision
initialization, make sure that each process in the distributed group receives the same data
from the data_loader as the other processes, otherwise the create_compressed_model call may
create different compressed model objects for each distributed process and the distributed training
will fail.
"""
def __init__(self, data_loader: DataLoader,
eval_fn: Callable[[torch.nn.Module, torch.utils.data.DataLoader], float],
nncf_config: 'NNCFConfig'):
self.data_loader = data_loader
self.eval_fn = eval_fn
self.config = nncf_config
@classmethod
def get_id(cls) -> str:
return "autoq_precision_init_args"
|
[
"noreply@github.com"
] |
sshyran.noreply@github.com
|
862b20b09be678debb5763ce7aed391cdd305028
|
375f29655b966e7dbac2297b3f79aadb5d03b737
|
/Image/test6.py
|
e0c2f30ebc95da86684e30c0aa49fd81708a4285
|
[
"MIT"
] |
permissive
|
pection-zz/FindJointwithImageprocessing
|
33e0b47ca3629d85e739edcd88dcd1663af88631
|
3dd4563be88dfcf005c32f19ae97d03f9bf715ad
|
refs/heads/master
| 2022-12-23T11:09:04.391591
| 2020-10-05T16:35:21
| 2020-10-05T16:35:21
| 301,473,183
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
import numpy as np
import cv2
# mouse callback function
def draw_circle(event,x,y,flags,param):
global human,tiger,logo
positionX = tiger.shape[1]
positionY = tiger.shape[0]
col = human.shape[1]
row = human.shape[0]
if event == cv2.EVENT_LBUTTONDOWN: # click.
y -= row/2 #
x -= col/2 # center.
if y+row > positionY : ## error.
#row = positionY-y
y = positionY-row
elif y < 0:
y = 0
if x + col > positionX:
#col = positionX - x
x = positionX - col
elif x < 0:
x = 0 ## error.
# print (x,y) # position x,y
# print (positionX,positionY)
logo = tiger[y:y + row, x:x + col] # show tiger picture before add human picture.
k = cv2.waitKey(1000) & 0xFF # ESC Exit.
if k == ord('1'): # function
logo = np.add(logo,human[0:row,0:col])
if k == ord('2'):
logo = np.subtract(logo,human[0:row,0:col])
if k == ord('3'):
logo = np.multiply(logo,human[0:row,0:col])
if k == ord('4'):
logo = np.divide(logo,human[0:row,0:col])
if k == ord('5'):
logo = np.bitwise_and(logo,human[0:row,0:col])
if k == ord('6'):
logo = np.bitwise_or(logo,human[0:row,0:col])
if k == ord('7'):
logo = np.bitwise_xor(logo,human[0:row,0:col]) # function.
tiger[y:y+row, x:x+col] = logo # show tiger picture after add human picture.
# Create a black image, a window and bind the function to window
tiger = cv2.imread('C:\Code_python\Image\Picture\Tiger.jpg')
human = cv2.imread('C:\Code_python\Image\Picture\Human.jpg')
while(1):
cv2.setMouseCallback('image', draw_circle)
cv2.imshow('image',tiger)
#cv2.imshow('eiei',img2)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
[
"pection.naphat@gmail.com"
] |
pection.naphat@gmail.com
|
6f194f8fee5688dc5d7ab4e2da990b9aaf9ad2a8
|
ecb286df5937cd30855335f3e9eadd3edbddbd02
|
/CARSELL.py
|
2c5425df7c655dd40a48d79202114e60d1bfbfba
|
[] |
no_license
|
prashant97sikarwar/codechef-april-long-challenge-2020
|
fbbddadb3398a285fe735f3c0049f74371b79100
|
5e459541044b54e64fd63b072ff5bf4870dea126
|
refs/heads/master
| 2022-04-14T21:06:41.237608
| 2020-04-13T19:48:29
| 2020-04-13T19:48:29
| 254,818,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
t = int(input())
while t > 0:
n = int(input())
arr = list(map(int,input().split()))
arr.sort()
arr = arr[::-1]
total = 0
for i in range(n):
fg = arr[i] - i
if fg > 0:
total += fg
else:
break
ans = total % 1000000007
print(ans)
t -= 1
|
[
"prashant97sikarwar@gmail.com"
] |
prashant97sikarwar@gmail.com
|
3dfbf8c94d2352552fc10b1451ec343edf118d69
|
686d2e525b7cd7a792501309f251dbf6dcea7ef4
|
/剑指offer/14.2剪绳子-贪心法.py
|
cd7abf147180fc0496002f548c7b684b08bfbf0b
|
[] |
no_license
|
freemanwang/Algorithm
|
fa23c9c33c43f942e72d9d1828a95417e7c99575
|
bb691c1afb460a382d7aaaa308e8b4e17f5bf4c5
|
refs/heads/master
| 2020-06-29T19:37:32.584724
| 2020-02-07T06:36:29
| 2020-02-07T06:36:29
| 200,605,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
def maxCuttingSolution(length:int):
if length < 2:
return 0
if length == 2:
return 1
if length == 3:
return 2
#多剪长为3的段
timeOf3 = length // 3
#当最后剩下长度为4时减2*2,比3*1好
if length - timeOf3*3 == 1:
timeOf3 -= 1
timeOf2 = (length - timeOf3*3) // 2
print('长为3的段有:',timeOf3,'段; ','长为2的段有:',timeOf2,'段')
return pow(3,timeOf3) * pow(2,timeOf2)
max = maxCuttingSolution(7)
print(max)
|
[
"121689123@qq.com"
] |
121689123@qq.com
|
9db1a5211567b08ec0bd272c70b33e7b4b2417b3
|
fb82fdf706863465b1f357cd1fa0447474cd8a70
|
/ServerComponent/venv/Lib/site-packages/rsrc/contrib/db/sqla/serializer.py
|
86d9f430b9df9c7e03c90d865f5bb3be82ec6e8c
|
[
"MIT"
] |
permissive
|
CDU55/FakeNews
|
d79e2a069b3f1392f779d5b2256cd54c696e789a
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
refs/heads/main
| 2023-02-20T06:27:18.618837
| 2021-01-17T15:14:27
| 2021-01-17T15:14:27
| 305,167,221
| 0
| 1
|
MIT
| 2020-12-07T19:51:46
| 2020-10-18T18:16:49
|
Python
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rsrc import settings
from jsonsir import Serializer
from jsonsir.contrib.intencoder import IntEncoder
from jsonsir.contrib.boolencoder import BoolEncoder
from jsonsir.contrib.datetimeencoder import DateTimeEncoder
# instantiate `Serializer` (bound with specified encoders)
serializer = Serializer([
IntEncoder(),
BoolEncoder(),
DateTimeEncoder(settings.DATE_FORMAT),
])
|
[
"48147775+BiancaChirica@users.noreply.github.com"
] |
48147775+BiancaChirica@users.noreply.github.com
|
7a99160a9b41a3afe3b729d916ebd377593d9fa2
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/representations/sprite/sprite_api.py
|
c0609f57f38b0754440948820163b44f8bde915a
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,921
|
py
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.sprite_representation import SpriteRepresentation
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.sprite.sprite_representation_list_query_params import SpriteRepresentationListQueryParams
class SpriteApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(SpriteApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, manifest_id, period_id, adaptationset_id, sprite_representation, **kwargs):
# type: (string_types, string_types, string_types, SpriteRepresentation, dict) -> SpriteRepresentation
"""Add Sprite Representation
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the image adaptation set
:type adaptationset_id: string_types, required
:param sprite_representation: The Sprite representation to be added to the adaptation set. Note that the adaptation set has to be an image adaptation set. Only supported for sprites generated with encoder version `2.76.0` or above.
:type sprite_representation: SpriteRepresentation, required
:return: Sprite representation
:rtype: SpriteRepresentation
"""
return self.api_client.post(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
sprite_representation,
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
type=SpriteRepresentation,
**kwargs
)
def delete(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
# type: (string_types, string_types, string_types, string_types, dict) -> BitmovinResponse
"""Delete Sprite Representation
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param representation_id: Id of the Sprite representation to be deleted
:type representation_id: string_types, required
:return: Id of the Sprite Representation
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=BitmovinResponse,
**kwargs
)
def get(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
# type: (string_types, string_types, string_types, string_types, dict) -> SpriteRepresentation
"""Sprite Representation Details
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param representation_id: Id of the Sprite representation
:type representation_id: string_types, required
:return: Sprite Representation details
:rtype: SpriteRepresentation
"""
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=SpriteRepresentation,
**kwargs
)
def list(self, manifest_id, period_id, adaptationset_id, query_params=None, **kwargs):
# type: (string_types, string_types, string_types, SpriteRepresentationListQueryParams, dict) -> SpriteRepresentation
"""List all Sprite Representations
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param query_params: Query parameters
:type query_params: SpriteRepresentationListQueryParams
:return: List of Sprite Representations
:rtype: SpriteRepresentation
"""
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
query_params=query_params,
pagination_response=True,
type=SpriteRepresentation,
**kwargs
)
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
63183dac12e1ac146a8ee95e08c3d767c7460535
|
76e498240a644b7ccf7e6af69f958f72af595a3c
|
/2018/function.py
|
44fe021fe2be5eac9710967e541d465baee557c2
|
[] |
no_license
|
VladyslavHnatchenko/united
|
54a868b9bdb54b510fb33f6b74562f2fb2c23c01
|
64d3319b18fcc8e1dbb96a63f7bef0c2e5766520
|
refs/heads/master
| 2020-04-13T07:24:28.053997
| 2019-08-23T07:22:10
| 2019-08-23T07:22:10
| 163,050,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
def function_a():
global a
a = 1
b = 2
return a+b
def function_b():
c = 3
return a+c
print(function_a())
print(function_b())
# def many(*args, **kwargs):
# print(args)
# print(kwargs)
#
#
# many(1, 2, 3, name="Mike", job="programmer")
# def keyword_function(a=1, b=2):
# return a+b
#
#
# print(keyword_function(b=4, a=5))
# def add(a, b):
# return a + b
#
#
# print(add(a=2, b=3))
# total = add(b=4, a=5)
# print(total)
# print(add(1, 2))
# add(1)
# def empty_function():
# pass
#
#
# def a_function():
# print("You just created a function!")
#
#
# # a_function()
# empty_function()
|
[
"hnatchenko.vladyslav@gmail.com"
] |
hnatchenko.vladyslav@gmail.com
|
6e9202c9029c4103e41f6eb7df2b3592fa136a5c
|
946469c469a07e70260143805c0b395508aad27f
|
/tf01_helloword/tf_01_helloword.py
|
4812457f6c8033ceb7f726ff5dfe5858b9446803
|
[] |
no_license
|
jpegbert/TensorFlow1.x
|
6f4bf2d658ac3cea298b0247c405f48cefa5db7f
|
bded173429581805324fda4bccd8180eafdd3496
|
refs/heads/master
| 2023-03-21T15:19:39.440658
| 2021-03-19T12:57:02
| 2021-03-19T12:57:02
| 348,709,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import tensorflow as tf
message = tf.constant('Welcome to the exciting world of Deep Neural Networks!')
with tf.Session() as sess:
print(sess.run(message).decode())
|
[
"jiangpeng.jiang@zhaopin.com.cn"
] |
jiangpeng.jiang@zhaopin.com.cn
|
5930cfc112f15a47ff6b5e6f315d023db88b1b72
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/development/samples/SampleSyncAdapter/samplesyncadapter_server/model/datastore.py
|
1f916332184373abece4825b266b23e6cfb2503a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868
| 2017-02-18T17:48:49
| 2017-02-18T17:48:49
| 81,847,777
| 0
| 2
|
MIT
| 2020-03-09T00:02:12
| 2017-02-13T16:47:00
| null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
#!/usr/bin/python2.5
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Represents user's contact information"""
from google.appengine.ext import db
class Contact(db.Model):
"""Data model class to hold user objects."""
handle = db.StringProperty(required=True)
firstname = db.StringProperty()
lastname = db.StringProperty()
phone_home = db.PhoneNumberProperty()
phone_office = db.PhoneNumberProperty()
phone_mobile = db.PhoneNumberProperty()
email = db.EmailProperty()
status = db.TextProperty()
avatar = db.BlobProperty()
deleted = db.BooleanProperty()
updated = db.DateTimeProperty(auto_now_add=True)
@classmethod
def get_contact_info(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get()
return None
@classmethod
def get_contact_last_updated(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().updated
return None
@classmethod
def get_contact_id(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().key().id()
return None
@classmethod
def get_contact_status(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().status
return None
|
[
"karun.matharu@gmail.com"
] |
karun.matharu@gmail.com
|
946474b8afc07b9a6402d4e856c9d8b4e23a8aa7
|
aed0850065dd467c0d0650c41987b61e94cad9c6
|
/day 16/merging.py
|
a62f058715350f049773cbc9a756eca3ed872a3c
|
[] |
no_license
|
parmarjh/100day-coding-challenge
|
96c79cc86a8f1e0b062b72dd5992610597e289e8
|
8b3e1f6654e4a55a08b4f938f13626fcc2aa8468
|
refs/heads/master
| 2023-02-18T19:51:22.200057
| 2020-12-31T12:54:10
| 2020-12-31T12:54:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def printList(self):
temp = self.head
while temp:
print(temp.data, end=" ")
temp = temp.next
def addToList(self, newData):
newNode = Node(newData)
if self.head is None:
self.head = newNode
return
last = self.head
while last.next:
last = last.next
last.next = newNode
def mergeLists(headA, headB):
dummyNode = Node(0)
tail = dummyNode
while True:
if headA is None:
tail.next = headB
break
if headB is None:
tail.next = headA
break
if headA.data <= headB.data:
tail.next = headA
headA = headA.next
else:
tail.next = headB
headB = headB.next
tail = tail.next
return dummyNode.next
listA = LinkedList()
listB = LinkedList()
listA.addToList(5)
listA.addToList(10)
listA.addToList(15)
listB.addToList(2)
listB.addToList(3)
listB.addToList(20)
listA.head = mergeLists(listA.head, listB.head)
print("Merged Linked List is:")
listA.printList()
|
[
"annamalaipalani11@gmail.com"
] |
annamalaipalani11@gmail.com
|
f295ae8bb445794a84f3e45c99863c3f72ad0726
|
9bbd4f00fd88474b3ab1f007cb6848cf6c2304e8
|
/run.py
|
93c10171ae06cd2b21a361602775df251bd21300
|
[] |
no_license
|
cappuccino213/AutoGTF
|
3128deb15bf6ebd67ed811773b3ef972f51fa9b7
|
e9ee23860c5f59011367fb84c646f942fb5890ef
|
refs/heads/master
| 2020-04-16T17:52:30.603769
| 2019-01-17T03:01:46
| 2019-01-17T03:01:46
| 165,792,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 14:20
# @Author : Zhangyp
# @File : run.py
# @Software: PyCharm
# @license : Copyright(C), eWord Technology Co., Ltd.
# @Contact : yeahcheung213@163.com
from mssql import *
# from ReadConf import *
# import os
from shellcopy import *
import shutil
import decimal
PARA = conf()# 获取配置文件
def get_filepath():
"""获取文件和路径dict"""
ms = MSSQL(host=PARA['host'], user=PARA['user'], pwd=PARA['password'], db=PARA['dbname'])
path = ms.ExecQuery(PARA['query_statement'])
try:
lp = []
ln = []
for i in range(len(path)):
n = path[i][0].split('\\')[-1]
ln.append(n)
p = path[i][0].split(n)[0]
p = p.split('\\',1)[1]
lp.append(p)
return ln, lp
except Exception as e:
logging.error(str(e))
def find_file():
"""遍历file目录下所有文件"""
try:
cp = os.getcwd()
f = [i for i in os.listdir(os.path.join(cp, 'file')) if not os.path.isdir(i)]
return f
except OSError as e:
logging.error(str(e))
# return []
def generate_file(src_file, num):
"""根据原文件生成指定数量的文件"""
filepath = src_file.rpartition('\\')[0]+'\\' # 提取路径
filename = src_file.split('\\')[-1].split('.')[0] # 提取文件名
filesuffix = src_file.split('\\')[-1].split('.')[1] # 提取后缀名
for i in range(num):
dst_file = filepath+filename+str(i)+'.'+filesuffix # 新生成的文件
shutil.copyfile(src_file, dst_file)
def main():
"""将指定文件重命名,复制到指定文件夹"""
(filename, paths) = get_filepath() # 获取目标文件的名字列表、相对路径
try:
abspath = [os.getcwd() + '\\newfile' + paths[i] for i in range(len(paths))]# 目标文件的绝对路径
try:
for i in range(len(abspath)):
os.makedirs(abspath[i])# 创建目标文件路径
logging.info('任务%s:文件夹->%s 创建成功'%(str(i), abspath[i]))
except OSError as e:
logging.warning(str(e))
srcname = find_file()
if len(srcname) == len(paths):
for i in range(len(srcname)):
'''重命名文件'''
oldname = os.path.join(os.getcwd(), 'file', srcname[i]) # 旧文件名
newname = os.path.join(abspath[i], filename[i]) # 新文件名
try:
os.rename(oldname, newname)
logging.info('任务%s:重命名文件%s' % (str(i), newname))
except FileExistsError as e:
logging.warning('%s【建议】清空newfile目录后重试'%str(e))
if PARA['isshare'] == '1':
openshare(PARA['path'], PARA['shareuser'], PARA['sharepwd'])
shellcopy(os.getcwd() + '\\newfile', PARA['path'])
closeshare()
elif PARA['isshare'] == '0':
mkdir(PARA['path'])
shellcopy(os.getcwd() + '\\newfile', PARA['path'])
else:
pass
else:
logging.warning('源文件与目的生成文件数量不符')
except Exception as e:
logging.info(str(e))
if __name__ == '__main__':
# main()
generate_file(r'E:\1\2ewrfewr.dcm', 2000)
|
[
"yeahcheung213@163.com"
] |
yeahcheung213@163.com
|
4fb445678a4cd023a19dc7dd202db200d82ccfba
|
9331f7179c2490f9bc0141ce91ebea704124e168
|
/clr.py
|
3aaea86cb672e27e196ed3354ac23a1e91344fba
|
[] |
no_license
|
peternara/Contrastive-learning-for-image-retrieval-self-training
|
50fbb35ca4f4f8cef6f70e7f037bb65f1f58bc21
|
f04bdd62a1a647207c599570394c93327fb02044
|
refs/heads/main
| 2023-03-24T13:01:19.448203
| 2021-03-19T07:29:32
| 2021-03-19T07:29:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,435
|
py
|
import os
os.environ['CUDA_LAUNCH_BLOCKING']='1'
import torch
from models.BiTmodel import BiTSimCLR
from models.ViTmodel import VisionTransformerSimCLR
from models.Efficientmodel import EfficientCLR
from models.CGDmodel import CGDmodel
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from loss.supconloss import SupConLoss
from utils.utils import get_device, count_parameters, save_config_file, AverageMeter, set_bn_eval
import pytorch_warmup as warmup
import sys
from tqdm import tqdm
import logging
import numpy as np
torch.manual_seed(0)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
apex_support = False
try:
sys.path.append('./apex')
from apex import amp
apex_support = True
except:
print("Please install apex for mixed precision training from: https://github.com/NVIDIA/apex")
apex_support = False
class SimCLR(object):
def __init__(self, dataset, config):
self.config = config
self.device = get_device()
self.writer = SummaryWriter()
self.dataset = dataset
self.train_config = config["SimCLR"]
self.loss_config = config['subcon-loss']
self.criterion = SupConLoss(self.loss_config['temperature'],
contrast_mode=self.loss_config['mode'],
base_temperature=self.loss_config['base'],
device=self.device).to(self.device)
if(config['model_name'] == 'ViT'):
model = VisionTransformerSimCLR(config).to(self.device)
elif(config['model_name'] == 'Eff'):
model = EfficientCLR(config).to(self.device)
elif(config['model_name'] == 'CGD'):
model = CGDmodel(config).to(self.device)
else:
model = BiTSimCLR(config).to(self.device)
self.model = self._load_pre_trained_weights(model)
num_params = count_parameters(self.model)
logger.info("Total Parameter: \t%2.1fM" % num_params)
def _step(self, xi, xj, labels=None):
images = torch.cat([xi, xj], dim=0)
images = images.to(self.device)
bsz = self.config['batch_size']
features, _ = self.model(images)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
if self.loss_config["method"] == 'SupCon':
labels = labels.to(self.device)
loss = self.criterion(features, labels)
elif self.loss_config["method"] == 'SimCRL':
loss = self.criterion(features)
return loss
def train(self):
#load data loader
train_loader, valid_loader = self.dataset.get_train_validation_data_loaders()
#define optimier
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, self.model.parameters()), self.train_config['lr'], weight_decay=eval(self.train_config['weight_decay']))
n_steps = self.train_config["epochs"] * len(train_loader)
#learning rate schudler
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_steps)
warmup_scheduler = warmup.UntunedLinearWarmup(optimizer)
if apex_support and self.config['fp16_precision']:
self.model, optimizer = amp.initialize(self.model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=True)
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
# save config file
save_config_file(model_checkpoints_folder)
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", n_steps)
n_iter = 0
valid_n_iter = 0
best_valid_loss = np.inf
losses = AverageMeter()
for epoch_counter in range(self.train_config['epochs']):
self.model.train()
# self.model.apply(set_bn_eval)
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for [xis, xjs], labels in epoch_iterator:
optimizer.zero_grad()
loss = self._step(xis, xjs, labels)
losses.update(loss.item(), self.config["batch_size"])
if n_iter % self.train_config['log_every_n_steps'] == 0:
self.writer.add_scalar('train_loss', loss, global_step=n_iter)
if apex_support and self.train_config['fp16_precision']:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
n_iter += 1
epoch_iterator.set_description(
"Training (%d / %d Epochs) (loss=%2.5f)" % (epoch_counter, self.train_config['epochs'], losses.val)
)
# warmup for the first 10 epochs
scheduler.step(scheduler.last_epoch+1)
warmup_scheduler.dampen()
# validate the model if requested
if epoch_counter % self.train_config['eval_every_n_epochs'] == 0:
valid_loss = self._validate(valid_loader)
if valid_loss < best_valid_loss:
# save the model weights
best_valid_loss = valid_loss
torch.save(self.model.state_dict(), os.path.join(model_checkpoints_folder, 'model.pth'))
self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
valid_n_iter += 1
self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
def _load_pre_trained_weights(self, model):
try:
checkpoints_folder = os.path.join('./runs', self.train_config['fine_tune_from'], 'checkpoints')
state_dict = torch.load(os.path.join(checkpoints_folder, 'model.pth'))
model.load_state_dict(state_dict)
logger.info("Loaded pre-trained model with success.")
except FileNotFoundError:
logger.info("Pre-trained weights not found. Training from scratch.")
return model
def _validate(self, valid_loader):
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
# validation steps
with torch.no_grad():
self.model.eval()
epoch_iterator = tqdm(valid_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for [xis, xjs], labels in epoch_iterator:
loss = self._step(xis, xjs, labels)
eval_losses.update(loss.item(), self.config["batch_size"])
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
return eval_losses.avg
|
[
"noreply@github.com"
] |
peternara.noreply@github.com
|
c3f066c947f48b2d598a1bc6be518303b1f2221e
|
72ede563023f78da0d23f36df0106aa4cd386600
|
/src/mailme/utils/text.py
|
0e2e912bd533cc7e3669e3cef8cd63fa16722f59
|
[
"BSD-3-Clause"
] |
permissive
|
mailme/mailme.io
|
773e8266e6ec307762d220c0a6381170f6905de3
|
3b9cc8009226bf349e96504328146c61d8afcb02
|
refs/heads/master
| 2020-05-31T18:59:01.947128
| 2014-07-04T14:48:17
| 2014-07-04T14:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
"""
mailme.utils.text
~~~~~~~~~~~~~~~~~
Various text realated tools.
"""
import re
_str_num_re = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
def increment_string(s):
"""Increment a number in a string or add a number."""
m = _str_num_re.search(s)
if m:
next = str(int(m.group(1)) + 1)
start, end = m.span(1)
if start or end:
return '{0}-{1}{2}'.format(
s[:max(end - len(next), start)],
next,
s[end:])
return s + '-2'
|
[
"cg@webshox.org"
] |
cg@webshox.org
|
15a028c91f53835f4b343ea7425d4e20c639cb9d
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_106_l_4/interactive_replay_config.py
|
6f2cfeb97d82fa75d61d3f7f457edfa22f7668a6
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606
| 2014-06-11T23:55:11
| 2014-06-11T23:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_106_l_4/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
|
[
"cs@cs.berkeley.edu"
] |
cs@cs.berkeley.edu
|
970f5a23bdfc872d4583272bcf1f1cde513713fe
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/test/test_1567_maximum_length_of_subarray_with_positive_product.py
|
be383471a80f55305d5a81b2bb7181ca2e4f326f
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
from unittest import TestCase
from problems.N1567_Maximum_Length_Of_Subarray_With_Positive_Product import Solution
class TestSolution(TestCase):
def test_getMaxLen(self):
self.assertEqual(4, Solution().getMaxLen([1, -2, -3, 4]))
def test_getMaxLen_1(self):
nums = [0,1,-2,-3,-4]
self.assertEqual(3, Solution().getMaxLen(nums))
def test_getMaxLen_2(self):
nums = [-1,-2,-3,0,1]
self.assertEqual(2, Solution().getMaxLen(nums))
def test_getMaxLen_3(self):
nums = [-1,2]
self.assertEqual(1, Solution().getMaxLen(nums))
def test_getMaxLen_4(self):
nums = [1,2,3,5,-6,4,0,10]
self.assertEqual(4, Solution().getMaxLen(nums))
def test_getMaxLen_5(self):
nums = [5,-20,-20,-39,-5,0,0,0,36,-32,0,-7,-10,-7,21,20,-12,-34,26,2]
self.assertEqual(8, Solution().getMaxLen(nums))
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
3b3ffedb0a26a37b64ae6911117d444709c961dd
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/2742.py
|
11f9b2447b2a937fc8b101e3bde6e62529371e24
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
def main(x):
result='Draw'
for i in range(0,4):
if x[i][0]!='.' and x[i][0]!='T':
flag=x[i][0]
else:
continue
count=0
for j in range(0,4):
if x[i][j]==flag or x[i][j]=='T':
count+=1
else:
break
if count==4:
result=flag+' won'
count=0
for i in range(0,4):
if x[0][i] != '.' and x[0][i] != 'T':
flag=x[0][i]
else:
continue
count1=0
for j in range(0,4):
if x[j][i]==flag or x[j][i]=='T':
count1+=1
else:
break
if count1==4:
result=flag +' won'
count1=0
for i in range(0,4):
if x[0][0] != '.' and x[0][0] != 'T':
flag=x[0][0]
else:
continue
if flag==x[i][i] or x[i][i]=='T':
count1+=1
else:
break
if count1==4:
result=flag +' won'
if x[0][3] != '.' and x[0][3] != 'T':
flag=x[0][3]
if (x[1][2]==flag or x[1][2]=='T') and (x[2][1]==flag or x[2][1]=='T') and (x[3][0]==flag or x[3][0]=='T'):
result=flag + ' won'
if result=='Draw':
for i in range(0,4):
for j in range(0,4):
if x[i][j]=='.':
result='Game has not completed'
break
return result
if __name__ == '__main__':
import sys
inp=[[''],[''],[''],['']]
inpf=open('1.txt')
outp=open('output.txt','w')
N = int(inpf.readline())
for i in xrange(N):
for j in xrange(4):
inp[j]=inpf.readline().strip()
res = main(inp)
K=inpf.readline().strip()
outp.write("Case #%d: %s\n" % (i + 1, res))
outp.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
059406c3a6505c171156ef98fa09d4509e2bfe85
|
66738cf02020d979410bf65be524ed9bb622e7c5
|
/homework/day0214/homework02.py
|
1fab74d3a284ddda1f80f9403c3284315061adc7
|
[] |
no_license
|
Liu-Zhijuan-0313/pythonAdvance
|
882a2c19cf611f068e4549a8b06bdfd0036d3624
|
e5b794f8d0fa0da5465fe123ac179ac09d62cf24
|
refs/heads/master
| 2020-04-22T09:15:57.963165
| 2019-02-23T08:02:49
| 2019-02-23T08:02:49
| 170,261,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
# 2.实现需求:给订单列表,用户余额两个函数添加装饰函数,能够完成权限验证功能
# 1.原始
# def checkuser():
# username = input("请输入用户名:")
# if username == "lzj":
# print("登录成功")
# showlist()
# showmoney()
# else:
# print("未授权,登录失败")
#
#
# def showlist():
# print("订单列表")
# def showmoney():
# print("用户余额")
# checkuser()
# 2.带闭包
# def checkuser(fun):
# def check():
# username = input("请输入用户名:")
# if username == "lzj":
# print("登录成功")
# fun()
# else:
# print("未授权,登录失败")
# return check
#
# def showlist():
# print("订单列表")
# showlist = checkuser(showlist)
# showlist()
# def showmoney():
# print("用户余额")
# showmoney = checkuser(showmoney)
# showmoney()
# 3.带装饰器
def checkuser(fun):
def check():
username = input("请输入用户名:")
if username == "lzj":
print("登录成功")
fun()
else:
print("未授权,登录失败")
return check
@checkuser
def showlist():
print("订单列表")
showlist()
@checkuser
def showmoney():
print("用户余额")
showmoney()
|
[
"1602176692@qq.com"
] |
1602176692@qq.com
|
053462ec4a7e180cc789d1c4e57d7317a937c305
|
de1d7a3d8f29f88cc81163daf13e689b6a40f059
|
/email_messages/forms.py
|
405125dc3385913d028d2a32af5fedf65de7a455
|
[] |
no_license
|
kelechi2020/golivecomptask
|
764c8be7cdb760b492bbd203cb1831fd47ba9e0c
|
76c2d3c1ad4830399be0bf41bb63731bc50fe5e6
|
refs/heads/master
| 2022-12-10T14:34:28.809502
| 2017-08-06T12:36:45
| 2017-08-06T12:36:45
| 98,848,170
| 0
| 0
| null | 2022-12-08T00:42:51
| 2017-07-31T04:30:17
|
CSS
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import User
class MessageForm(forms.Form):
recipient = forms.ModelChoiceField(label=_("Recipient"), queryset=User.objects.all(), required=True,)
message = forms.CharField(label=_("Message"), widget=forms.Textarea, required=True,)
def __init__(self, request, *args, **kwargs):
super(MessageForm, self).__init__(*args, **kwargs)
self.request = request
self.fields["recipient"].queryset = self.fields["recipient"].queryset.exclude(pk=request.user.pk)
def save(self):
cleaned_data = self.cleaned_data
send_mail(subject=ugettext("A message from %s") % self.request.user, message=cleaned_data["message"], from_email=self.request.user.email, recipient_list=[cleaned_data["recipient"]], fail_silently=True)
|
[
"egbosikelechi@gmail.com"
] |
egbosikelechi@gmail.com
|
0bbb06dc8d7cbf276b4acee582f650109dd8b1fa
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/celery/2015/12/test_builtins.py
|
73601734b9176fe303b39b5ab5d9b913e6298584
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 5,411
|
py
|
from __future__ import absolute_import
from celery import group, chord
from celery.app import builtins
from celery.five import range
from celery.utils.functional import pass1
from celery.tests.case import AppCase, ContextMock, Mock, patch
class BuiltinsCase(AppCase):
def setup(self):
@self.app.task(shared=False)
def xsum(x):
return sum(x)
self.xsum = xsum
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
class test_backend_cleanup(BuiltinsCase):
def test_run(self):
self.app.backend.cleanup = Mock()
self.app.backend.cleanup.__name__ = 'cleanup'
cleanup_task = builtins.add_backend_cleanup_task(self.app)
cleanup_task()
self.assertTrue(self.app.backend.cleanup.called)
class test_accumulate(BuiltinsCase):
def setup(self):
self.accumulate = self.app.tasks['celery.accumulate']
def test_with_index(self):
self.assertEqual(self.accumulate(1, 2, 3, 4, index=0), 1)
def test_no_index(self):
self.assertEqual(self.accumulate(1, 2, 3, 4), (1, 2, 3, 4))
class test_map(BuiltinsCase):
def test_run(self):
@self.app.task(shared=False)
def map_mul(x):
return x[0] * x[1]
res = self.app.tasks['celery.map'](
map_mul, [(2, 2), (4, 4), (8, 8)],
)
self.assertEqual(res, [4, 16, 64])
class test_starmap(BuiltinsCase):
def test_run(self):
@self.app.task(shared=False)
def smap_mul(x, y):
return x * y
res = self.app.tasks['celery.starmap'](
smap_mul, [(2, 2), (4, 4), (8, 8)],
)
self.assertEqual(res, [4, 16, 64])
class test_chunks(BuiltinsCase):
@patch('celery.canvas.chunks.apply_chunks')
def test_run(self, apply_chunks):
@self.app.task(shared=False)
def chunks_mul(l):
return l
self.app.tasks['celery.chunks'](
chunks_mul, [(2, 2), (4, 4), (8, 8)], 1,
)
self.assertTrue(apply_chunks.called)
class test_group(BuiltinsCase):
def setup(self):
self.maybe_signature = self.patch('celery.canvas.maybe_signature')
self.maybe_signature.side_effect = pass1
self.app.producer_or_acquire = Mock()
self.app.producer_or_acquire.attach_mock(ContextMock(), 'return_value')
self.app.conf.task_always_eager = True
self.task = builtins.add_group_task(self.app)
super(test_group, self).setup()
def test_apply_async_eager(self):
self.task.apply = Mock(name='apply')
self.task.apply_async((1, 2, 3, 4, 5))
self.assertTrue(self.task.apply.called)
def mock_group(self, *tasks):
g = group(*tasks, app=self.app)
result = g.freeze()
for task in g.tasks:
task.clone = Mock(name='clone')
task.clone.attach_mock(Mock(), 'apply_async')
return g, result
@patch('celery.app.base.Celery.current_worker_task')
def test_task(self, current_worker_task):
g, result = self.mock_group(self.add.s(2), self.add.s(4))
self.task(g.tasks, result, result.id, (2,)).results
g.tasks[0].clone().apply_async.assert_called_with(
group_id=result.id, producer=self.app.producer_or_acquire(),
add_to_parent=False,
)
current_worker_task.add_trail.assert_called_with(result)
@patch('celery.app.base.Celery.current_worker_task')
def test_task__disable_add_to_parent(self, current_worker_task):
g, result = self.mock_group(self.add.s(2, 2), self.add.s(4, 4))
self.task(g.tasks, result, result.id, None, add_to_parent=False)
self.assertFalse(current_worker_task.add_trail.called)
class test_chain(BuiltinsCase):
def setup(self):
BuiltinsCase.setup(self)
self.task = builtins.add_chain_task(self.app)
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.task()
class test_chord(BuiltinsCase):
def setup(self):
self.task = builtins.add_chord_task(self.app)
super(test_chord, self).setup()
def test_apply_async(self):
x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s())
r = x.apply_async()
self.assertTrue(r)
self.assertTrue(r.parent)
def test_run_header_not_group(self):
self.task([self.add.s(i, i) for i in range(10)], self.xsum.s())
def test_forward_options(self):
body = self.xsum.s()
x = chord([self.add.s(i, i) for i in range(10)], body=body)
x.run = Mock(name='chord.run(x)')
x.apply_async(group_id='some_group_id')
self.assertTrue(x.run.called)
resbody = x.run.call_args[0][1]
self.assertEqual(resbody.options['group_id'], 'some_group_id')
x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
x2.run = Mock(name='chord.run(x2)')
x2.apply_async(chord='some_chord_id')
self.assertTrue(x2.run.called)
resbody = x2.run.call_args[0][1]
self.assertEqual(resbody.options['chord'], 'some_chord_id')
def test_apply_eager(self):
self.app.conf.task_always_eager = True
x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s())
r = x.apply_async()
self.assertEqual(r.get(), 90)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
d0c3bd3d778e8c722d0034f32411f2807179fe54
|
aec28a032dd5788d9201d6325f2efa285116696e
|
/snake_iterator/t2d.py
|
cd291e17c7b793521c037741410e3ab47419c48d
|
[] |
no_license
|
pletzer/pyterp_tests
|
346addfe89ff14613e986ca2b9a14206f9b41d45
|
56be0634d8f7402ce5322a6a67c1843a593d31de
|
refs/heads/master
| 2020-05-29T08:50:40.072549
| 2017-07-20T03:23:34
| 2017-07-20T03:23:34
| 69,289,048
| 1
| 3
| null | 2017-03-22T19:15:02
| 2016-09-26T20:15:44
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
n0, n1 = 3, 4
inds = []
for j in range(n0):
for i in range(n1):
indexFlat = n1*j + i
indexSnake = n1*j + (1 - j%2)*i + (n1 - 1 - i)*(j%2)
inds.append(indexSnake)
print('indexFlat = {} indexSnake = {}'.format(indexFlat, indexSnake))
inds.sort()
print(inds)
|
[
"alexander@gokliya.net"
] |
alexander@gokliya.net
|
826e0cec2f2c532e1a1a11b0b59549a07ebdb131
|
924814aef07d17e10461ed2da54e935ea40c0456
|
/links2markdown/links2markdown.py
|
67b641d2f1aa7c7e6e21061f024b545880499be3
|
[
"WTFPL",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
hydrargyrum/attic
|
edd7f302e273cd7e762c8bd8efd365ac4cd24aa1
|
bf90a01ddaeb505b783ec3853c46aaaa0aa51304
|
refs/heads/master
| 2023-09-01T08:11:13.126290
| 2023-08-27T13:06:55
| 2023-08-27T13:08:09
| 13,541,344
| 18
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: WTFPL
import argparse
import re
import signal
import sys
from html.parser import HTMLParser
import requests
LINK_RE = re.compile(r"""https?://[^])'">\s]+""")
class TitleFetchParser(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = []
self.title = None
def handle_starttag(self, tag, attrs):
self.path.insert(0, tag)
def handle_endtag(self, tag):
try:
idx = self.path.index(tag)
except ValueError:
return
raise AssertionError(f"{self.path[0]!r} != {tag!r}")
del self.path[:idx + 1]
def handle_data(self, data):
if self.title:
return
if self.path and self.path[0] == "title" and "head" in self.path:
self.title = data
def fetch_title(url):
try:
response = requests.get(
url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/113.0",
},
)
except requests.exceptions.RequestException:
return None
parser = TitleFetchParser(convert_charrefs=True)
try:
parser.feed(response.text)
parser.close()
except AssertionError as exc:
print(f"failed on {url}: {exc}", file=sys.stderr)
return None
else:
return parser.title
def link_to_markdown(m):
url = m[0]
if m.start() > 2 and m.string[m.start() - 1] == "(" and m.string[m.start() - 2] == "]":
return url
title = fetch_title(url) or url
title = re.sub(r"\s+", " ", title.strip())
return f"[{title}]({url})"
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
parser = argparse.ArgumentParser()
parser.add_argument("file", default="-", nargs="?")
args = parser.parse_args()
if args.file == "-":
fp = sys.stdin
else:
fp = open(args.file)
with fp:
for line in fp:
line = LINK_RE.sub(link_to_markdown, line)
print(line, end="")
if __name__ == "__main__":
main()
|
[
"dev@indigo.re"
] |
dev@indigo.re
|
b2e6507f7cbc0024a8361d9508c6c4dc5de947ec
|
3a3533b16b54d42d6889c490224345ca985bef74
|
/account_loewie/stock_loewie.py
|
30917e56bf0b12bb40ff4b547bbe9fbb910738b7
|
[
"Apache-2.0"
] |
permissive
|
lester-lees/extra_addons_hk
|
52916ac6858d4b4484bd137b55268c7d5de177d0
|
edd2c2595146bc9c99b75a2d0831a93f940fa55c
|
refs/heads/master
| 2021-01-06T20:43:33.448307
| 2019-03-28T06:46:17
| 2019-03-28T06:46:17
| 99,546,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
class stock_move(osv.osv):
_inherit = "stock.move"
_order = 'id , date_expected desc'
def _get_sale_order_line(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[move.id] = move.procurement_id.sale_line_id.id
return result
_columns = {
'sale_order_line': fields.function(_get_sale_order_line, type='many2one', relation='sale.order.line',string='Sales Line'),
}
class stock_picking(osv.osv):
_inherit = 'stock.picking'
_order = "id desc, priority desc, date asc"
_columns = {
'ref_invoice':fields.many2one('account.invoice',string=u'关联发票'),
}
def show_account_delivery(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
result = act_obj.read(cr, uid, [483], context=context)[0]
if ids == 0:
result['domain'] = "[('state','=','done'), ('ref_invoice','=',False),('picking_type_id','in',[2])]"
elif ids == 1:
result['domain'] = "[('state','=','done'), ('ref_invoice','!=',False),('picking_type_id','in',[2])]"
return result
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
move_obj = self.pool.get('stock.move')
invoices = {}
_logger.info("Jimmy --- _invoice_create_line in sotck_loewie")
for move in moves:
company = move.company_id
origin = move.picking_id.name
partner, user_id, currency_id = move_obj._get_master_data(cr, uid, move, company, context=context)
key = (partner, currency_id, company.id, user_id)
invoice_vals = self._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if key not in invoices:
# Get account and payment terms
invoice_id = self._create_invoice_from_picking(cr, uid, move.picking_id, invoice_vals, context=context)
invoices[key] = invoice_id
invoice = invoice_obj.browse(cr, uid, [invoice_id], context=context)[0]
invoice.write({'picking_id': move.picking_id.id})
move.picking_id.ref_invoice = invoice_id
_logger.info("Jimmy picking_id:%d" % move.picking_id.id)
if move.picking_id.sale_id :
invoice.write({'sale_id': move.picking_id.sale_id.id})
_logger.info("Jimmy sale_id:%d" % move.picking_id.sale_id.id)
else:
invoice = invoice_obj.browse(cr, uid, invoices[key], context=context)
if not invoice.origin or invoice_vals['origin'] not in invoice.origin.split(', '):
invoice_origin = filter(None, [invoice.origin, invoice_vals['origin']])
invoice.write({'origin': ', '.join(invoice_origin)})
invoice.write({'picking_id': move.picking_id.id})
_logger.info("Jimmy nokey picking_id:%d" % move.picking_id.id)
move.picking_id.ref_invoice = invoice_id
if move.picking_id.sale_id :
_logger.info("Jimmy nokey sale_id:%d" % move.picking_id.sale_id.id)
invoice.write({'sale_id': move.picking_id.sale_id.id})
invoice_line_vals = move_obj._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
invoice_line_vals['invoice_id'] = invoices[key]
invoice_line_vals['origin'] = origin
move_obj._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
move_obj.write(cr, uid, move.id, {'invoice_state': 'invoiced'}, context=context)
invoice_obj.button_compute(cr, uid, invoices.values(), context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoices.values()
|
[
"346994202@qq.com"
] |
346994202@qq.com
|
b1947d513d3280e30c2ad6204ed27e488a5c4920
|
c09817490b36beaea98abc8c955904528c5cd4fd
|
/tests/test_0013-rntuple-anchor.py
|
1c3cb5a8ffd9fbacef60a0be301fe4f5ae217ce2
|
[
"BSD-3-Clause"
] |
permissive
|
oshadura/uproot4
|
245b7e14a3341d87a9e655792c6ee912ad443586
|
ee535f6632d371d82b5173a43d6445c854968315
|
refs/heads/master
| 2023-08-19T13:48:23.541016
| 2021-09-22T23:51:52
| 2021-09-22T23:51:52
| 287,539,468
| 0
| 0
|
BSD-3-Clause
| 2020-08-14T13:29:03
| 2020-08-14T13:29:02
| null |
UTF-8
|
Python
| false
| false
| 3,610
|
py
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import json
import sys
try:
import queue
except ImportError:
import Queue as queue
import numpy
import pytest
import skhep_testdata
import uproot
def test():
filename = skhep_testdata.data_path("uproot-ntpl001_staff.root")
with uproot.open(filename) as f:
obj = f["Staff"]
assert obj.member("fVersion") == 0
assert obj.member("fSize") == 48
assert obj.member("fSeekHeader") == 854
assert obj.member("fNBytesHeader") == 537
assert obj.member("fLenHeader") == 2495
assert obj.member("fSeekFooter") == 72369
assert obj.member("fNBytesFooter") == 285
assert obj.member("fLenFooter") == 804
assert obj.member("fReserved") == 0
header_start = obj.member("fSeekHeader")
header_stop = header_start + obj.member("fNBytesHeader")
header_chunk = f.file.source.chunk(header_start, header_stop)
print("HEADER")
cursor = uproot.Cursor(header_start)
cursor.debug(header_chunk, 80)
print("\n")
notifications = queue.Queue()
footer_start = obj.member("fSeekFooter")
footer_stop = footer_start + obj.member("fNBytesFooter")
header_chunk, footer_chunk = f.file.source.chunks(
[(header_start, header_stop), (footer_start, footer_stop)],
notifications,
)
print("FOOTER")
cursor = uproot.Cursor(footer_start)
cursor.debug(footer_chunk, 80)
print("\n")
# HEADER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 16 2 0 191 9 0 198 14 105 8 80 63 75 128 117 0 0
# L 4 --- --- --- --- --- --- --- --- --- i --- P ? K --- u --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 0 187 9 0 1 0 144 5 0 0 0 83 116 97 102 102 13 0 255
# --- --- --- --- --- --- --- --- --- --- --- --- S t a f f --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 6 16 0 0 0 117 110 100 101 102 105 110 101 100 32 97 117 116 104 111
# --- --- --- --- --- u n d e f i n e d a u t h o
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 114 0 1 0 4 47 24 0 1 0 3 31 12 12 0 0 4 8 0 110
# r --- --- --- --- / --- --- --- --- --- --- --- --- --- --- --- --- --- n
# FOOTER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 20 1 0 36 3 0 86 138 213 67 60 183 39 139 27 0 1
# L 4 --- --- --- --- $ --- --- V --- --- C < --- ' --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 23 1 12 0 23 12 12 0 42 72 0 1 0 47 24 0 1 0 7
# --- --- --- --- --- --- --- --- --- * H --- --- --- / --- --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 34 26 13 8 0 34 145 5 8 0 34 213 9 86 0 27 13 84 0 0
# " --- --- --- --- " --- --- --- --- " --- --- V --- --- --- T --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 1 0 102 52 26 0 0 148 1 124 0 0 16 0 34 102 15 17 0 34
# --- --- f 4 --- --- --- --- --- | --- --- --- --- " f --- --- --- "
|
[
"noreply@github.com"
] |
oshadura.noreply@github.com
|
49127c9d5ec63cf0edbb76c8518ef71b738cb115
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03282/s807999610.py
|
ae207e71f6f810f99b64ea485d43701412aa2740
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
import sys
input = sys.stdin.readline
def main():
S = input().rstrip()
K = int(input())
ans = 1
for k in range(K):
if S[k] != "1":
ans = int(S[k])
break
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4c0d24e4899cef4d0b65d73aa2aadd5eccbd352e
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-LaunchServices-2.5.1/setup.py
|
69e9ac501f5f908146c2de98a08b37554caf8233
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
'''
Wrappers for the "LaunchServices" framework on MacOSX. The API's in this
framework enable applications to open other applictions or their document
files, simularly to how the Dock or Finder do that.
A number of tasks that can be implemented using this framework:
* Launch or activate applications
* Open documents in other applications
* Identify the preferred application for opening a document
* Register information about the kinds of documents an application
can open (UTI's)
* Obtain information for showing a document (display name, icon, ...)
* Maintain and update the contents of the Recent Items menu.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
NOTE: This wrapper is not complete, this will change in a future version.
'''
from pyobjc_setup import setup
setup(
name='pyobjc-framework-LaunchServices',
version="2.5.1",
description = "Wrappers for the framework LaunchServices on Mac OS X",
packages = [ "LaunchServices" ],
setup_requires = [
'pyobjc-core>=2.5.1',
],
install_requires = [
'pyobjc-core>=2.5.1',
'pyobjc-framework-Cocoa>=2.5.1',
],
)
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
689c4a2a4fbcae5c4efd6838dbe43bd9740fbf9f
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/vrmnic005/question3.py
|
1c61ad08760818ce92e029b1eb28c9a08c1f20c6
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
name = input("Enter first name: \n")
surname = input("Enter last name: \n")
money = eval(input("Enter sum of money in USD: \n"))
country = input("Enter country name: \n")
money30 = money*(30/100)
print ()
print ("Dearest ", name,
"\nIt is with a heavy heart that I inform you of the death of my father,\n"
"General Fayk ",surname, ", your long lost relative from Mapsfostol.\n"
"My father left the sum of ", money, "USD for us, your distant cousins.\n"
"Unfortunately, we cannot access the money as it is in a bank in ", country, ".\n"
"I desperately need your assistance to access this money.\n"
"I will even pay you generously, 30% of the amount - ", money30, "USD,\n"
"for your help. Please get in touch with me at this email address asap.\n"
"Yours sincerely\n"
"Frank ", surname,sep ='')
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
45119b2bfb441e6442a560c850e77270bbf09204
|
3b96724c917b3cbbf39a03b42a0c3570cd3714c0
|
/lsstetc.py
|
8ea7790fcc4440ab82fa13614169f62c758a1a7b
|
[] |
no_license
|
wadawson/LSST_ETC
|
44b24737a0515b7d76d74f068c21a66e1ac7f3e6
|
b893ad6c8c162b2b784b67e5e2a6de05cfb1c75b
|
refs/heads/master
| 2020-07-26T04:42:54.654982
| 2015-08-24T23:05:09
| 2015-08-24T23:05:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,640
|
py
|
"""An exposure time calculator for LSST. Uses GalSim to draw a galaxy with specified magnitude,
shape, etc, and then uses the same image as the optimal weight function. Derived from D. Kirkby's
notes on deblending.
"""
import numpy as np
import galsim
# Some constants
# --------------
#
# LSST effective area in meters^2
A = 319/9.6 # etendue / FoV. I *think* this includes vignetting
# zeropoints from DK notes in photons per second per pixel
# should eventually compute these on the fly from filter throughput functions.
s0 = {'u': A*0.732,
'g': A*2.124,
'r': A*1.681,
'i': A*1.249,
'z': A*0.862,
'Y': A*0.452}
# Sky brightnesses in AB mag / arcsec^2.
# stole these from http://www.lsst.org/files/docs/gee_137.28.pdf
# should eventually construct a sky SED (varies with the moon phase) and integrate to get these
B = {'u': 22.8,
'g': 22.2,
'r': 21.3,
'i': 20.3,
'z': 19.1,
'Y': 18.1}
# number of visits
# From LSST Science Book
fiducial_nvisits = {'u': 56,
'g': 80,
'r': 180,
'i': 180,
'z': 164,
'Y': 164}
# exposure time per visit
visit_time = 30.0
# Sky brightness per arcsec^2 per second
sbar = {}
for k in B:
sbar[k] = s0[k] * 10**(-0.4*(B[k]-24.0))
# And some random numbers for drawing
bd = galsim.BaseDeviate(1)
class ETC(object):
def __init__(self, band, pixel_scale=None, stamp_size=None, threshold=0.0,
nvisits=None):
self.pixel_scale = pixel_scale
self.stamp_size = stamp_size
self.threshold = threshold
self.band = band
if nvisits is None:
self.exptime = fiducial_nvisits[band] * visit_time
else:
self.exptime = nvisits * visit_time
self.sky = sbar[band] * self.exptime * self.pixel_scale**2
self.sigma_sky = np.sqrt(self.sky)
self.s0 = s0[band]
def draw(self, profile, mag, noise=False):
img = galsim.ImageD(self.stamp_size, self.stamp_size, scale=self.pixel_scale)
flux = self.s0 * 10**(-0.4*(mag - 24.0)) * self.exptime
profile = profile.withFlux(flux)
profile.drawImage(image=img)
if noise:
gd = galsim.GaussianNoise(bd, sigma=self.sigma_sky)
img.addNoise(gd)
return img
def SNR(self, profile, mag):
img = self.draw(profile, mag, noise=False)
mask = img.array > (self.threshold * self.sigma_sky)
imgsqr = img.array**2*mask
signal = imgsqr.sum()
noise = np.sqrt((imgsqr * self.sky).sum())
return signal / noise
def err(self, profile, mag):
snr = self.SNR(profile, mag)
return 2.5 / np.log(10) / snr
def display(self, profile, mag, noise=True):
img = self.draw(profile, mag, noise)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.imshow(img.array, cmap=cm.Greens)
plt.colorbar()
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
# Filter
parser.add_argument("--band", default='i',
help="band for simulation (Default 'i')")
# PSF structural arguments
PSF_profile = parser.add_mutually_exclusive_group()
PSF_profile.add_argument("--kolmogorov", action="store_true",
help="Use Kolmogorov PSF (Default Gaussian)")
PSF_profile.add_argument("--moffat", action="store_true",
help="Use Moffat PSF (Default Gaussian)")
parser.add_argument("--PSF_beta", type=float, default=3.0,
help="Set beta parameter of Moffat profile PSF. (Default 2.5)")
parser.add_argument("--PSF_FWHM", type=float, default=0.67,
help="Set FWHM of PSF in arcsec (Default 0.67).")
parser.add_argument("--PSF_phi", type=float, default=0.0,
help="Set position angle of PSF in degrees (Default 0.0).")
parser.add_argument("--PSF_ellip", type=float, default=0.0,
help="Set ellipticity of PSF (Default 0.0)")
# Galaxy structural arguments
parser.add_argument("-n", "--sersic_n", type=float, default=1.0,
help="Sersic index (Default 1.0)")
parser.add_argument("--gal_ellip", type=float, default=0.3,
help="Set ellipticity of galaxy (Default 0.3)")
parser.add_argument("--gal_phi", type=float, default=0.0,
help="Set position angle of galaxy in radians (Default 0.0)")
parser.add_argument("--gal_HLR", type=float, default=0.2,
help="Set galaxy half-light-radius. (default 0.5 arcsec)")
# Simulation input arguments
parser.add_argument("--pixel_scale", type=float, default=0.2,
help="Set pixel scale in arcseconds (Default 0.2)")
parser.add_argument("--stamp_size", type=int, default=31,
help="Set postage stamp size in pixels (Default 31)")
# Magnitude!
parser.add_argument("--mag", type=float, default=25.3,
help="magnitude of galaxy")
# threshold
parser.add_argument("--threshold", type=float, default=0.0,
help="Threshold, in sigma-sky units, above which to include pixels")
# Observation characteristics
parser.add_argument("--nvisits", type=int, default=None)
# draw the image!
parser.add_argument("--display", action='store_true',
help="Display image used to compute SNR.")
args = parser.parse_args()
if args.kolmogorov:
psf = galsim.Kolmogorov(fwhm=args.PSF_FWHM)
elif args.moffat:
psf = galsim.Moffat(fwhm=args.PSF_FWHM, beta=args.PSF_beta)
else:
psf = galsim.Gaussian(fwhm=args.PSF_FWHM)
psf = psf.shear(e=args.PSF_ellip, beta=args.PSF_phi*galsim.radians)
gal = galsim.Sersic(n=args.sersic_n, half_light_radius=args.gal_HLR)
gal = gal.shear(e=args.gal_ellip, beta=args.gal_phi*galsim.radians)
profile = galsim.Convolve(psf, gal)
etc = ETC(args.band, pixel_scale=args.pixel_scale, stamp_size=args.stamp_size,
threshold=args.threshold, nvisits=args.nvisits)
print
print "input"
print "------"
print "band: {}".format(args.band)
print "magnitude: {}".format(args.mag)
print
print "output"
print "------"
print "SNR: {}".format(etc.SNR(profile, args.mag))
print "mag err: {}".format(etc.err(profile, args.mag))
if args.display:
etc.display(profile, args.mag)
|
[
"jmeyers314@gmail.com"
] |
jmeyers314@gmail.com
|
fda72b8c00fbe56089e69e878bc92dd0b2d869cf
|
8a9c26468d352f52e2773ee3d7f97fae25a9f4f2
|
/example/geo_example.py
|
81095e70b182653736e08b698dc9065e7b9ab480
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Watemlifts/pyecharts
|
76b301a0013cf628e22581b10bfba94e600ba788
|
42c9af85877c812449ad8d3aa942135e95468714
|
refs/heads/master
| 2022-01-04T04:18:40.615309
| 2019-07-02T18:49:02
| 2019-07-02T18:49:02
| 194,917,624
| 1
| 0
|
MIT
| 2021-12-30T19:54:49
| 2019-07-02T18:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
from example.commons import Collector, Faker
from pyecharts import options as opts
from pyecharts.charts import Geo, Page
from pyecharts.globals import ChartType, SymbolType
C = Collector()
@C.funcs
def geo_base() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(Faker.provinces, Faker.values())])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-基本示例"),
)
)
return c
@C.funcs
def geo_visualmap_piecewise() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(Faker.provinces, Faker.values())])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True),
title_opts=opts.TitleOpts(title="Geo-VisualMap(分段型)"),
)
)
return c
@C.funcs
def geo_effectscatter() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"geo",
[list(z) for z in zip(Faker.provinces, Faker.values())],
type_=ChartType.EFFECT_SCATTER,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-EffectScatter"))
)
return c
@C.funcs
def geo_heatmap() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"geo",
[list(z) for z in zip(Faker.provinces, Faker.values())],
type_=ChartType.HEATMAP,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-HeatMap"),
)
)
return c
@C.funcs
def geo_guangdong() -> Geo:
c = (
Geo()
.add_schema(maptype="广东")
.add(
"geo",
[list(z) for z in zip(Faker.guangdong_city, Faker.values())],
type_=ChartType.HEATMAP,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-广东地图"),
)
)
return c
@C.funcs
def geo_lines() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"",
[("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
type_=ChartType.EFFECT_SCATTER,
color="white",
)
.add(
"geo",
[("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
type_=ChartType.LINES,
effect_opts=opts.EffectOpts(
symbol=SymbolType.ARROW, symbol_size=6, color="blue"
),
linestyle_opts=opts.LineStyleOpts(curve=0.2),
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines"))
)
return c
@C.funcs
def geo_lines_background() -> Geo:
c = (
Geo()
.add_schema(
maptype="china",
itemstyle_opts=opts.ItemStyleOpts(color="#323c48", border_color="#111"),
)
.add(
"",
[("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
type_=ChartType.EFFECT_SCATTER,
color="white",
)
.add(
"geo",
[("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
type_=ChartType.LINES,
effect_opts=opts.EffectOpts(
symbol=SymbolType.ARROW, symbol_size=6, color="blue"
),
linestyle_opts=opts.LineStyleOpts(curve=0.2),
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines-background"))
)
return c
Page().add(*[fn() for fn, _ in C.charts]).render()
|
[
"chenjiandongx@qq.com"
] |
chenjiandongx@qq.com
|
2ba1a3b3c29fa6e3f7d79aeb1519606c031c4b7d
|
a9a6b09c53e77c996f552bf48b4625d280044905
|
/utils/annotation.py
|
f5b8223213824777c1f9d9d9bf62656f9cc1d7a8
|
[] |
no_license
|
yanqinghao/AiLab-detectron2
|
8f8f98ae0f3183102c9b9421a4f314c549d5a2d1
|
05d6016ae3f8c397d08eba485b97fd2a25848f3c
|
refs/heads/master
| 2022-11-23T23:21:47.686819
| 2020-04-09T02:25:24
| 2020-04-09T02:25:24
| 229,718,014
| 0
| 0
| null | 2022-11-22T04:19:49
| 2019-12-23T09:17:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import os
import itertools
import numpy as np
from suanpan.utils import image, json
from detectron2.structures import BoxMode
def get_balloon_dicts(img_dir, json_file):
"""
Parsing via json
"""
imgs_anns = json.load(json_file)
dataset_dicts = []
imagefile = [i.split(".jpg")[0] + ".jpg" for i in imgs_anns["metadata"].keys()]
for idx, v in enumerate(set(imagefile)):
record = {}
indices = [i for i, x in enumerate(imagefile) if x == v]
filename = os.path.join(img_dir, v)
height, width = image.read(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
objs = []
for index in indices:
data = list(imgs_anns["metadata"].values())[index]
xy = data["xy"][1:]
px = xy[::2]
py = xy[1::2]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = list(itertools.chain.from_iterable(poly))
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": 0,
"iscrowd": 0,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
|
[
"woshiyanqinghao@gmail.com"
] |
woshiyanqinghao@gmail.com
|
b0b2d646e0da85e1252012bdb744b3cc410bbd8f
|
687a3cc0e531d77e91fbdb27ace757197bc287e3
|
/test/dist_utils.py
|
476e7cecc2456da911283791382cf69d9a887ea7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aporter1350/pytorch
|
b4871331e0a225c0cd028bd176e9ebb5ee730921
|
bb1d9b238dbda5da1ba7b65953558fd2deea1f00
|
refs/heads/master
| 2020-09-13T15:18:00.562350
| 2019-11-20T01:21:34
| 2019-11-20T01:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,929
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
from functools import partial, wraps
from os import getenv
import torch.distributed as dist
import torch.distributed.rpc as rpc
if not dist.is_available():
print("c10d not available, skipping tests")
sys.exit(0)
class TestConfig:
__slots__ = ["rpc_backend_name"]
def __init__(self, *args, **kwargs):
assert len(args) == 0, "TestConfig only takes kwargs."
for k, v in kwargs.items():
setattr(self, k, v)
TEST_CONFIG = TestConfig(rpc_backend_name=getenv("RPC_BACKEND_NAME", "PROCESS_GROUP"))
INIT_METHOD_TEMPLATE = "file://{file_name}"
MASTER_RANK = 0
_ALL_NODE_NAMES = set()
_DONE_NODE_NAMES = set()
_TERMINATION_SIGNAL = threading.Event()
def on_master_follower_report_done(worker_name):
assert (
worker_name in _ALL_NODE_NAMES
), "{worker_name} is not expected by master.".format(worker_name=worker_name)
assert (
worker_name not in _DONE_NODE_NAMES
), "{worker_name} report done twice.".format(worker_name=worker_name)
_DONE_NODE_NAMES.add(worker_name)
if _ALL_NODE_NAMES != _DONE_NODE_NAMES:
return
set_termination_signal()
def set_termination_signal():
assert not _TERMINATION_SIGNAL.is_set(), "Termination signal got set twice."
_TERMINATION_SIGNAL.set()
def dist_init(old_test_method=None, setup_rpc=True, clean_shutdown=True):
"""
We use this decorator for setting up and tearing down state since
MultiProcessTestCase runs each `test*` method in a separate process and
each process just runs the `test*` method without actually calling
'setUp' and 'tearDown' methods of unittest.
"""
# If we use dist_init without arguments (ex: @dist_init), old_test_method is
# appropriately set and we return the wrapper appropriately. On the other
# hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
# old_test_method is None and we return a functools.partial which is the real
# decorator that is used and as a result we recursively call dist_init with
# old_test_method and the rest of the arguments appropriately set.
if old_test_method is None:
return partial(
dist_init,
setup_rpc=setup_rpc,
clean_shutdown=clean_shutdown,
)
@wraps(old_test_method)
def new_test_method(self, *arg, **kwargs):
self.worker_id = self.rank
self.worker_name_to_id = {
"worker{}".format(rank): rank for rank in range(self.world_size)
}
if setup_rpc:
global _ALL_NODE_NAMES
_ALL_NODE_NAMES = self.worker_name_to_id.keys()
# Use enough 'num_send_recv_threads' until we fix https://github.com/pytorch/pytorch/issues/26359
rpc.init_rpc(
self_name="worker%d" % self.rank,
backend=self.rpc_backend,
init_method=self.init_method,
self_rank=self.rank,
worker_name_to_id=self.worker_name_to_id,
num_send_recv_threads=16,
)
return_value = old_test_method(self, *arg, **kwargs)
if setup_rpc:
if clean_shutdown:
# Follower reports done.
if self.rank == MASTER_RANK:
on_master_follower_report_done("worker{}".format(MASTER_RANK))
else:
rpc.rpc_async(
"worker{}".format(MASTER_RANK),
on_master_follower_report_done,
args=("worker{}".format(self.rank),),
)
# Master waits for followers to report done.
# Follower waits for master's termination command.
_TERMINATION_SIGNAL.wait()
if self.rank == MASTER_RANK:
# Master sends termination command.
futs = []
for dst_rank in range(self.world_size):
# torch.distributed.rpc module does not support sending to self.
if dst_rank == MASTER_RANK:
continue
dst_name = "worker{}".format(dst_rank)
fut = rpc.rpc_async(dst_name, set_termination_signal, args=())
futs.append(fut)
for fut in futs:
assert fut.wait() is None, "Sending termination signal failed."
# Close RPC. Need to do this even if we don't have a clean shutdown
# since we need to shutdown the RPC agent. If we don't shutdown the
# RPC agent, tests would fail since RPC agent threads, locks and
# condition variables are not properly terminated.
rpc.join_rpc()
return return_value
return new_test_method
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5648aac33570ad9b8a37e766c09a6bf0f2449f3d
|
ed11f664cbc459c7a4456dd58f2b231edcb22f33
|
/ctm_saas_client/models/variable_names.py
|
6dd582aa1ba63cf9a8ffd6bc9cc44f1eac2484e0
|
[
"BSD-3-Clause"
] |
permissive
|
jpmc216/ctm_python_client
|
c8b8ba60580bf869b3d1e6af9b99737e0a7ea527
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
refs/heads/main
| 2023-08-26T22:06:34.022576
| 2021-10-25T13:41:31
| 2021-10-25T13:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,614
|
py
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_saas_client.configuration import Configuration
class VariableNames(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variables': 'list[str]'
}
attribute_map = {
'variables': 'variables'
}
def __init__(self, variables=None, _configuration=None): # noqa: E501
"""VariableNames - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._variables = None
self.discriminator = None
if variables is not None:
self.variables = variables
@property
def variables(self):
"""Gets the variables of this VariableNames. # noqa: E501
Array of pool variables in format %%\\\\PoolName\\AUTOVarInPool. HIDDEN. # noqa: E501
:return: The variables of this VariableNames. # noqa: E501
:rtype: list[str]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this VariableNames.
Array of pool variables in format %%\\\\PoolName\\AUTOVarInPool. HIDDEN. # noqa: E501
:param variables: The variables of this VariableNames. # noqa: E501
:type: list[str]
"""
self._variables = variables
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VariableNames, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VariableNames):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VariableNames):
return True
return self.to_dict() != other.to_dict()
|
[
"cmoraes@bmc.com"
] |
cmoraes@bmc.com
|
f0bc1f5d3eaba5d03fdf3717b4edf8d85c9cf035
|
3649dce8b44c72bbfee56adf4e29ca6c5ba2703a
|
/code_up2721.py
|
95431018d2d23835e6f09154a0044a6cddea5ccd
|
[] |
no_license
|
beOk91/code_up
|
03c7aca76e955e3a59d797299749e7fc2457f24a
|
ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690
|
refs/heads/master
| 2022-12-06T08:23:00.788315
| 2020-08-20T11:21:59
| 2020-08-20T11:21:59
| 284,844,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
num1=input()
num2=input()
num3=input()
if num1[len(num1)-1]==num2[0]:
if num2[len(num2)-1]==num3[0]:
if num3[len(num3)-1]==num1[0]:
print("good")
else:
print("bad")
else:
print("bad")
else:
print("bad")
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
d91bc0db5f2379799c817125ffb8dd2f36f295e9
|
b81668a2cc43654cf6a3ed952d781310876838f9
|
/venv/Lib/site-packages/spacy/tests/regression/test_issue4924.py
|
b240f6d4a49a39413e8609221ea61fab8295918d
|
[] |
no_license
|
gowthamr1999/docbot-1
|
6a8b873407f15035fb8b30b69ed66ded343bd1e4
|
3119958d68e95673b4c9187d58d8cad5c18a6b2c
|
refs/heads/master
| 2023-04-07T02:16:55.574750
| 2021-04-16T02:52:38
| 2021-04-16T02:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import pytest
from spacy.language import Language
def test_issue4924():
nlp = Language()
docs_golds = [("", {})]
nlp.evaluate(docs_golds)
|
[
"42891786+kiranm211@users.noreply.github.com"
] |
42891786+kiranm211@users.noreply.github.com
|
e5d323d8c1600cbe6084b699f4ad2b6e07e1d5b5
|
fbc2678b5de0c14a6e417c01168e35a4bb8fe91a
|
/src/translator/setup.py
|
0a3ffc75982ea00c0aa3b59065c56b3726b86356
|
[] |
no_license
|
Kotaimen/sam-lambda-edge-translator
|
16dbfbe7a30eb6b3d1369ce7c297273c2f33cbbb
|
cb95d616d5891a22b43b86f8daf2da240c118e68
|
refs/heads/master
| 2022-12-10T18:17:41.057907
| 2020-02-14T15:13:43
| 2020-02-14T15:13:43
| 240,535,647
| 0
| 0
| null | 2022-12-08T03:36:50
| 2020-02-14T15:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
from setuptools import setup, find_packages
setup(
name="translator",
version="1.0",
packages=find_packages(exclude=["tests.*", "tests"]),
include_package_data=True,
package_data={
# 'package': ['filename']
},
install_requires=[],
)
|
[
"kotaimen.c@gmail.com"
] |
kotaimen.c@gmail.com
|
ca630ab63a475c278ea9ac110b08e4dec53ff4b6
|
2b6b6a1729abd9023736ab1b38704ad38b2efe59
|
/functions-basics-fundamentals/smallest_of_three_numbers.py
|
652f41b443da9a5604796547469f5331614da6ef
|
[] |
no_license
|
DavidStoilkovski/python-fundamentals
|
27fc3381c9ec3f5a792beca8bc778dc32d6ada7a
|
782a2376210e9564265b17db6f610e00ffd99c9c
|
refs/heads/main
| 2023-04-03T10:39:30.762453
| 2021-04-13T06:51:51
| 2021-04-13T06:51:51
| 357,452,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
import sys
num_1 = int(input())
num_2 = int(input())
num_3 = int(input())
def small(num_1, num_2, num_3):
smallest_of_all = sys.maxsize
if num_1 <= smallest_of_all:
smallest_of_all = num_1
if num_2 <= smallest_of_all:
smallest_of_all = num_2
if num_3 <= smallest_of_all:
smallest_of_all = num_3
result = smallest_of_all
return result
result = small(num_1, num_2, num_3)
print(result)
|
[
"stoilkovskidavid@gmail.com"
] |
stoilkovskidavid@gmail.com
|
bfc9c5c6d1069c9925eefc45287d32e63063fce8
|
f648c5b25d4df1db47474b6ec57e0aaa6790800a
|
/isso/utils/__init__.py
|
de3be2b13161aaf9d1ce35510ad4ef9dc78c1a08
|
[
"MIT"
] |
permissive
|
waytai/isso
|
e35959eb4fa8c23107ecdf493fd74e6869fcb5a7
|
6d9f43939a5a1407fe8343158493a9b30545a196
|
refs/heads/master
| 2020-04-15T09:48:46.320438
| 2013-11-05T13:15:16
| 2013-11-05T13:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
# -*- encoding: utf-8 -*-
from __future__ import division
import pkg_resources
werkzeug = pkg_resources.get_distribution("werkzeug")
import json
import random
import hashlib
from string import ascii_letters, digits
from werkzeug.wrappers import Request
from werkzeug.exceptions import BadRequest
import ipaddress
def anonymize(remote_addr):
"""
Anonymize IPv4 and IPv6 :param remote_addr: to /24 (zero'd)
and /48 (zero'd).
>>> anonymize(u'12.34.56.78') # doctest: +IGNORE_UNICODE
'12.34.56.0'
>>> anonymize(u'1234:5678:90ab:cdef:fedc:ba09:8765:4321') # doctest: +IGNORE_UNICODE
'1234:5678:90ab:0000:0000:0000:0000:0000'
"""
try:
ipv4 = ipaddress.IPv4Address(remote_addr)
return u''.join(ipv4.exploded.rsplit('.', 1)[0]) + '.' + '0'
except ipaddress.AddressValueError:
ipv6 = ipaddress.IPv6Address(remote_addr)
if ipv6.ipv4_mapped is not None:
return anonymize(ipv6.ipv4_mapped)
return u'' + ipv6.exploded.rsplit(':', 5)[0] + ':' + ':'.join(['0000']*5)
def salt(value, s=u'\x082@t9*\x17\xad\xc1\x1c\xa5\x98'):
return hashlib.sha1((value + s).encode('utf-8')).hexdigest()
def mksecret(length):
return ''.join(random.choice(ascii_letters + digits) for x in range(length))
class Bloomfilter:
"""A space-efficient probabilistic data structure. False-positive rate:
* 1e-05 for <80 elements
* 1e-04 for <105 elements
* 1e-03 for <142 elements
Uses a 256 byte array (2048 bits) and 11 hash functions. 256 byte because
of space efficiency (array is saved for each comment) and 11 hash functions
because of best overall false-positive rate in that range.
-- via Raymond Hettinger
http://code.activestate.com/recipes/577684-bloom-filter/
"""
def __init__(self, array=bytearray(256), elements=0, iterable=()):
self.array = array
self.elements = elements
self.k = 11
self.m = len(array) * 8
for item in iterable:
self.add(item)
def get_probes(self, key):
h = int(hashlib.sha256(key.encode()).hexdigest(), 16)
for _ in range(self.k):
yield h & self.m - 1
h >>= self.k
def add(self, key):
for i in self.get_probes(key):
self.array[i//8] |= 2 ** (i%8)
self.elements += 1
@property
def density(self):
c = ''.join(format(x, '08b') for x in self.array)
return c.count('1') / len(c)
def __contains__(self, key):
return all(self.array[i//8] & (2 ** (i%8)) for i in self.get_probes(key))
def __len__(self):
return self.elements
class JSONRequest(Request):
if werkzeug.version.startswith("0.8"):
def get_data(self, **kw):
return self.data.decode('utf-8')
def get_json(self):
try:
return json.loads(self.get_data(as_text=True))
except ValueError:
raise BadRequest('Unable to read JSON request')
|
[
"info@posativ.org"
] |
info@posativ.org
|
73b3aa3eb0eafa7c981e958cd9edfcce0db2f3af
|
0049d7959ff872e2ddf6ea3ce83b6c26512425a6
|
/templateProject1/testApp/views.py
|
46d23b84d0f2fdb60a2e8dd2df093a803f548e12
|
[] |
no_license
|
srazor09/Django_projects
|
9806ab25d966af780cdabe652a1792220c7806a8
|
8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93
|
refs/heads/master
| 2023-04-18T02:13:15.993393
| 2021-05-04T20:34:05
| 2021-05-04T20:34:05
| 364,379,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from django.shortcuts import render
import datetime
# Create your views here.
def tempView(request):
date= datetime.datetime.now()
MyDictionary={'date_msg' : date}
return render(request, 'testApp/wish.html',context=MyDictionary)
|
[
"sourabhaws09@gmail.com"
] |
sourabhaws09@gmail.com
|
734ce064902baeb13c6ee7a20c31d6f617d3a987
|
85eff920f0f285abad84c2f6bcfd4f236f3976ab
|
/webservices/migrations/0196_auto_20191106_0835.py
|
d3022e801061abd061dec41ad3ec0334b32d3d5c
|
[] |
no_license
|
obxlifco/Web-Picking-App-GoGrocery
|
8cf5f7924005a19764e5c4722a47bfd963965f2e
|
6b084547bed2af43a67bada313d68e56f4228f96
|
refs/heads/main
| 2023-05-26T08:32:30.297317
| 2021-06-12T10:05:01
| 2021-06-12T10:05:01
| 315,206,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-11-06 08:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webservices', '0195_auto_20191106_0658'),
]
operations = [
migrations.AddField(
model_name='engageboostshipmentorders',
name='return_delivery_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='engageboostshipmentorders',
name='return_driver_id',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"mjamal@lifcoshop.net"
] |
mjamal@lifcoshop.net
|
b33a52e10e6db1235a8a3804773fe7a1cd6c10f9
|
3873b03ac81354d4ed24e94df5fa8429e726bbd2
|
/titles/9. 回文数.py
|
dce4385d060f8329f8bf8360f90039b94634f4b7
|
[] |
no_license
|
lichangg/myleet
|
27032f115597481b6c0f3bbe3b83e80b34c76365
|
3d5a96d896ede3ea979783b8053487fe44e38969
|
refs/heads/master
| 2023-03-21T15:50:14.128422
| 2021-03-16T09:58:07
| 2021-03-16T09:58:07
| 286,616,721
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class Solution:
def isPalindrome(self, x: int) -> bool:
s=str(x)
div, mod = divmod(len(s), 2)
if div == 0:
return True
if mod:
left = s[:div]
right = s[div+1:]
else:
left=s[:div]
right = s[div:]
if left == right[::-1]:
return True
else:
return False
# 题目要求不能将数字转为字符串后处理. 所以智能用+-*/反转数字然后做对比
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0 or (x % 10 == 0 and x != 0):
return False
revertedNumber = 0
# 此处是将整数反转的方法,学到了
while x > revertedNumber:
revertedNumber = revertedNumber * 10 + x % 10
x //= 10
return x == revertedNumber or x == revertedNumber // 10
|
[
"lcg@ichunt.com"
] |
lcg@ichunt.com
|
93dda13c0501663cb66488d10bd6e44c5d682c67
|
1fdd2c6bb53dd8ddeba28c89ba1b65c692875999
|
/backend/apps/groups/models.py
|
c075f6c0d7e86200642688a5a1560c4cdd47cb09
|
[] |
no_license
|
Alymbekov/test_task_SynergyWay
|
782d0109cd7b63bdf5d0c05603f568da641af3f2
|
4ad07c393af1dec8395dcb754060d130ecea9fa6
|
refs/heads/master
| 2023-08-01T17:09:19.417147
| 2021-01-27T11:09:36
| 2021-01-27T11:09:36
| 254,460,227
| 2
| 1
| null | 2021-09-22T18:52:01
| 2020-04-09T19:26:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.db import models
#model to group table
class Group(models.Model):
name = models.CharField("Name", max_length=150)
description = models.TextField()
def __str__(self):
return self.name
class Meta:
verbose_name = "Group"
verbose_name_plural = "Groups"
|
[
"maxim.makarov.1997@mail.ru"
] |
maxim.makarov.1997@mail.ru
|
8793c6db98cfca73fc7b88ee015d243cd56599de
|
47343c9191f7fcfefae38b2d8160d39ba9410271
|
/O06triplets.py
|
67b46722768b72fb8ede85e85843143df15a335e
|
[] |
no_license
|
naveenameganathan/python3
|
01f7c06e48559693b1f132a8223ad9f9855e8a1f
|
6bff6f16de0a03dd36bedec140935c3af56b983f
|
refs/heads/master
| 2020-05-23T02:17:31.986878
| 2019-07-25T17:36:39
| 2019-07-25T17:36:39
| 186,600,896
| 1
| 4
| null | 2019-10-03T15:15:09
| 2019-05-14T10:35:52
|
Python
|
UTF-8
|
Python
| false
| false
| 195
|
py
|
p = int(input())
q = list(map(int,input().split()))
c = 0
for i in range(p):
for j in range(i,p):
for k in range(j,p):
if q[i]<q[j]<q[k]:
c+=1
print(c)
|
[
"noreply@github.com"
] |
naveenameganathan.noreply@github.com
|
0c33e9bbe8b36c3a0676bca201898fcecad7e191
|
e36225e61d95adfabfd4ac3111ec7631d9efadb7
|
/problems/CR/auto/problem220_CR.py
|
b03a0e4f5f53ef2573a1700d9c1d7cd085e3998f
|
[
"BSD-3-Clause"
] |
permissive
|
sunandita/ICAPS_Summer_School_RAE_2020
|
d2ab6be94ac508e227624040283e8cc6a37651f1
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
refs/heads/main
| 2023-01-01T02:06:40.848068
| 2020-10-15T17:25:01
| 2020-10-15T17:25:01
| 301,263,711
| 5
| 2
|
BSD-3-Clause
| 2020-10-15T17:25:03
| 2020-10-05T01:24:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
__author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8]
rv.EDGES = {1: [7], 2: [8], 3: [8], 4: [8], 5: [7], 6: [7], 7: [1, 5, 6, 8], 8: [2, 3, 4, 7]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
def ResetState():
state.loc = {'r1': 2}
state.charge = {'r1': 3}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': UNK}
state.containers = { 1:[],2:['o1'],3:[],4:[],5:[],6:[],7:[],8:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
4: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
}
|
[
"sunandita.patra@gmail.com"
] |
sunandita.patra@gmail.com
|
94ad35991846e7a87d4132dc62e43fc41748359e
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D12A/DOCARED12AUN.py
|
7112f56f43c44df6e1ef69c397d993862bf477cf
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD12AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FII', MIN: 1, MAX: 5, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 2},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
5a0514166417cfcf07b4e5bd79d4dc3fefe912d2
|
4bb31b4cb7b1872933d73dc15b5a2569a70bfad5
|
/marktex/marktex.py
|
58772ce680b2e33239a2c29d08cef2eb5eb16736
|
[] |
no_license
|
chthub/MarkTex
|
bf27aa402d5bd75d27d6a8b2cf5df19b9035a3d7
|
e998a03a5e607524c14bf8ea1c1f79c34e2110f8
|
refs/heads/master
| 2020-07-04T03:27:25.095562
| 2019-08-02T02:24:30
| 2019-08-02T02:24:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
import argparse,sys,os
APP_DESC="""
MarkTex is used to convert markdown document into tex format.
输出位置可以选择:
- 在各自的md文件下 default,最低优先级
- 统一输出到一个目录下 -o "path" ,第二优先级
- 在各自给定的目录下 -e "",优先级最高
输出到对应文件的 "文件名" 所在的目录下:
marktex a.md b.md ...
输出到一个同一的文件夹下:
marktex a.md b.md ... -o "path"
指定输出到各自文件夹,必须保证路径个数和文件个数相同:
marktex a.md b.md ... -e "pathfora" "pathforb" ...
"""
if len(sys.argv) == 1:
sys.argv.append('--help')
parser = argparse.ArgumentParser()
parser.add_argument('mdfiles', metavar='mdfiles', type=str, nargs='+',
help='place markdown path')
parser.add_argument('-o','--output',type=str,default=None,help="指定统一路径")
parser.add_argument('-e','--every',help="为每个文件分配路径",nargs="*")
args = parser.parse_args()
every = args.every
mdfiles = args.mdfiles
output = args.output
output_paths = []
if every is not None:
if len(every) != len(mdfiles):
print("you ues -e option, the number of outputdirs must be equal to markdown files.")
exit(1)
output_paths = every
elif output is not None:
output_paths = [output]*len(mdfiles)
else:
for mdfile in mdfiles:
mdfile = os.path.abspath(mdfile)
mdpath,fname = os.path.splitext(mdfile)
output_paths.append(mdpath)
from marktex.texrender.toTex import MarkTex
for mdfile,opath in zip(mdfiles,output_paths):
_,fname = os.path.split(mdfile)
fpre,_ = os.path.splitext(fname)
doc = MarkTex.convert_file(mdfile,opath)
doc.generate_tex(fpre)
print(f"[info*]convert finished.")
exit(0)
|
[
"sailist@outlook.com"
] |
sailist@outlook.com
|
83a957a1f924f564aa7c20dbadd7d44be83ce692
|
255021fadf9f739db042809ca95f5b9f75609ec5
|
/test_3/프로그래밍1.py
|
f14fdf6f552ef6ddf444a58ca15a9d0876f43729
|
[] |
no_license
|
unsung107/Algorithm_study
|
13bfff518fc1bd0e7a020bb006c88375c9ccacb2
|
fb3b8563bae7640c52dbe9324d329ca9ee981493
|
refs/heads/master
| 2022-12-13T02:10:31.173333
| 2020-09-13T11:32:10
| 2020-09-13T11:32:10
| 295,137,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
M, C = map(int,input().split())
messages = []
for _ in range(M):
messages.append(int(input()))
consumers = [[False] * 1001 for _ in range(C)]
max_idx = 0
for idx in range(1, 1001):
for c in range(C):
if not consumers[c][idx]:
gap = messages.pop(0)
consumers[c][idx: idx + gap] = [True] * gap
if idx + gap - 1 > max_idx:
max_idx = idx + gap - 1
if not messages:
break
if not messages:
break
print(max_idx)
|
[
"unsung102@naver.com"
] |
unsung102@naver.com
|
ee2d685b30f2cc02ae06dc9f61a71fb82adf6363
|
6066b2af4b4f6ab967cfb8af8ec3b8ee68545ab9
|
/nyenyenye/main.py
|
fb4d33da1a846dda10914eb8bab04e0886a20720
|
[] |
no_license
|
zsbati/PycharmProjects
|
7b29b210b4878af42baf288c585675d0203b9805
|
c13b05901c5ff8ea6fc7bcb61c70aa40940daa56
|
refs/heads/main
| 2023-09-04T09:01:19.315655
| 2021-10-24T16:24:43
| 2021-10-24T16:24:43
| 401,172,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# import string library function
import string
# Storing the value in variable result
result = string.digits
# Printing the value
print(help(string.digits))
'''print("I am = I'm")
print("I have = I've")
print("I have = I've")
print("I had / would = I'd")'''
|
[
"zbati123@gmail.com"
] |
zbati123@gmail.com
|
c0894817b359f565a61787e0b5398c3034bd645b
|
c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512
|
/.history/test_20200506092929.py
|
e9356e56f63da20a4420f3d3da4141f231c536e3
|
[] |
no_license
|
rbafna6507/passwordstorageproject
|
6465585e36c81075856af8d565fe83e358b4a40a
|
480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4
|
refs/heads/master
| 2022-11-25T12:05:02.625968
| 2020-07-27T21:33:38
| 2020-07-27T21:33:38
| 283,021,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
import pickle
import cryptography
from cryptography.fernet import Fernet
infile = open('pass.pkl','rb')
j = pickle.load(infile)
print(j)
delpass = input("Password to delete")
if "Website: " + delpass in j:
del j["Website: " + delpass]
outfile = open("test.pkl", "wb")
pickle.dump(j, outfile)
outfile.close()
infile = open('test.pkl','rb')
j = pickle.load(infile)
print(j)
else:
print("NOPE")
|
[
"35872545+rbafna6507@users.noreply.github.com"
] |
35872545+rbafna6507@users.noreply.github.com
|
fe9d900ddf672433c7aab61886618fa4fbecd0ef
|
e3bb7c26a8bcdc9a241feaa8d1c7b4edf936ec21
|
/mini服务器/装饰器/01.无参数无返回值的装饰器.py
|
af894c7d4b84efbf7dc5e4fa1c986c93ddbb9665
|
[] |
no_license
|
hezudao25/learnpython
|
a3b797caf72017c16455ed014824fe6cd0fdb36d
|
1a9dbe02ac442ab8d2077a002e6a635c58bbce04
|
refs/heads/master
| 2020-04-24T07:44:34.664354
| 2019-07-26T05:50:05
| 2019-07-26T05:50:05
| 171,807,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
import time
def set_func(func):
def call_func():
start_time = time.time()
func()
stop_time = time.time()
print("alltimeis %f" % (stop_time - start_time))
return call_func
@set_func # 等价于 test1 = set_func(test1)
def test1():
print("----test1-----")
for i in range(10000):
pass
#test1 = set_func(test1)
test1()
#test1()
|
[
"hezudao@msn.cn"
] |
hezudao@msn.cn
|
f9ee0e4095188a538b77e1bf3334f8d98d7da304
|
552ba1ede64e20980227e70c2e7fe2ee9c5f6a33
|
/tiny_tf/transformer.py
|
47a90c0c5bb0a861c6b97626d45ea30ca68c8398
|
[
"MIT"
] |
permissive
|
felixvd/tiny_tf
|
b7f5f3a573bbcd1ac15082e9fb0c53277aaf3a1a
|
d8f2c75e0da935450c4f7be1c87f79b2a75d1278
|
refs/heads/master
| 2020-05-16T03:19:38.460245
| 2019-09-04T07:45:54
| 2019-09-04T07:45:54
| 182,678,256
| 0
| 0
| null | 2019-04-22T08:48:31
| 2019-04-22T08:48:31
| null |
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
from .tf import *
import numpy as np
from . import transformations as tft
from collections import namedtuple
from . import geometry_msgs
class Transformer(TFTree):
"""
This class implements the same interfaces as the ROS tf.TransformListener().
"""
def __init__(self):
super(Transformer, self).__init__()
def setTransform(self, transform_stamped):
"""
For geometry_msgs.msg.TransformStamped
"""
xform = Transform(transform_stamped.transform.translation.x,
transform_stamped.transform.translation.y,
transform_stamped.transform.translation.z,
transform_stamped.transform.rotation.x,
transform_stamped.transform.rotation.y,
transform_stamped.transform.rotation.z,
transform_stamped.transform.rotation.w)
parent = transform_stamped.header.frame_id
child = transform_stamped.child_frame_id
self.add_transform(parent, child, xform)
def transformPoint(self, target_frame, point_stamped):
"""
point_stamped is a geometry_msgs.msg.PointStamped object.
Returns a PointStamped transformed to target_frame.
"""
t = self.lookup_transform(point_stamped.header.frame_id, target_frame)
p = self.transform_point(point_stamped.point.x, point_stamped.point.y, point_stamped.point.z, target_frame, point_stamped.header.frame_id)
ps_out = geometry_msgs.msg.PointStamped()
ps_out.header.frame_id = target_frame
ps_out.point.x = p[0]
ps_out.point.y = p[1]
ps_out.point.z = p[2]
return ps_out
def transformPose(self, target_frame, pose_stamped):
"""
pose_stamped is a geometry_msgs.msg.PoseStamped object
Returns a PoseStamped transformed to target_frame.
"""
t = self.lookup_transform(pose_stamped.header.frame_id, target_frame)
p = self.transform_pose(pose_stamped.pose.position.x, pose_stamped.pose.position.y, pose_stamped.pose.position.z,
pose_stamped.pose.orientation.x, pose_stamped.pose.orientation.y, pose_stamped.pose.orientation.z, pose_stamped.pose.orientation.w,
target_frame, pose_stamped.header.frame_id)
ps_out = geometry_msgs.msg.PoseStamped()
ps_out.header.frame_id = target_frame
ps_out.pose.position.x = p[0]
ps_out.pose.position.y = p[1]
ps_out.pose.position.z = p[2]
ps_out.pose.orientation.x = p[3]
ps_out.pose.orientation.y = p[4]
ps_out.pose.orientation.z = p[5]
ps_out.pose.orientation.w = p[6]
return ps_out
def lookupTransform(self, base_frame, target_frame):
"""
Returns a TransformStamped from base_frame to target_frame
"""
# TODO
# t = geometry_msgs.msg.TransformStamped()
return self.lookup_transform(base_frame, target_frame)
|
[
"FvDrigalski@gmail.com"
] |
FvDrigalski@gmail.com
|
a2c508ad7151b2721bd977a375212ace036c9aee
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/matrix-org_synapse/synapse-master/synapse/util/distributor.py
|
e68f94ce77728d0cc5352cc5c70b8de90ef915b5
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,894
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.util.logcontext import (
PreserveLoggingContext, preserve_context_over_fn
)
from synapse.util import unwrapFirstError
import logging
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_left_room", user=user, room_id=room_id
)
def user_joined_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_joined_room", user=user, room_id=room_id
)
class Distributor(object):
"""A central dispatch point for loosely-connected pieces of code to
register, observe, and fire signals.
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typoes, etc... But this
model will do for today.
"""
def __init__(self, suppress_failures=True):
self.suppress_failures = suppress_failures
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(
name,
suppress_failures=self.suppress_failures,
)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
# TODO: Avoid strong ordering dependency by allowing people to
# pre-register observations on signals that don't exist yet.
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
return self.signals[name].fire(*args, **kwargs)
class Signal(object):
"""A Signal is a dispatch point that stores a list of callables as
observers of it.
Signals can be "fired", meaning that every callable observing it is
invoked. Firing a signal does not change its state; it can be fired again
at any later point. Firing a signal passes any arguments from the fire
method into all of the observers.
"""
def __init__(self, name, suppress_failures):
self.name = name
self.suppress_failures = suppress_failures
self.observers = []
def observe(self, observer):
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
@defer.inlineCallbacks
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
def do(observer):
def eb(failure):
logger.warning(
"%s signal observer %s failed: %r",
self.name, observer, failure,
exc_info=(
failure.type,
failure.value,
failure.getTracebackObject()))
if not self.suppress_failures:
return failure
return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
with PreserveLoggingContext():
deferreds = [
do(observer)
for observer in self.observers
]
res = yield defer.gatherResults(
deferreds, consumeErrors=True
).addErrback(unwrapFirstError)
defer.returnValue(res)
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
f60c32358ba29e0c6f5f181e16c9a9be15c3a970
|
7275f7454ce7c3ce519aba81b3c99994d81a56d3
|
/sp1/python数据采集/数据采集基础/数据采集基本操作.py
|
a785c16fafcc2c6f8a703fe2c97dc5fc18c6eb6d
|
[] |
no_license
|
chengqiangaoci/back
|
b4c964b17fb4b9e97ab7bf0e607bdc13e2724f06
|
a26da4e4f088afb57c4122eedb0cd42bb3052b16
|
refs/heads/master
| 2020-03-22T08:36:48.360430
| 2018-08-10T03:53:55
| 2018-08-10T03:53:55
| 139,777,994
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
import requests
from bs4 import BeautifulSoup
#基本操作
# url = "http://www.pythonscraping.com/pages/page1.html"
# response = requests.get(url)
# soup = BeautifulSoup(response.text,"html.parser")
# print(soup)
# url = "http://www.pythonscraping.com/pages/warandpeace.html"
# response = requests.get(url)
# soup = BeautifulSoup(response.text,"html.parser")
# namelist = soup.find_all("span",{"class":"green"})
# print(namelist.text)
# url = "https://en.wikipedia.org/wiki/Kevin_Bacon"
# response = requests.get(url)
# soup = BeautifulSoup(response.text,"html.parser")
# for link in soup.find_all("a"): #a标签
# if "href" in link.attrs:
# print(link.attrs["href"])
#子标签与后代标签
# url = "http://www.pythonscraping.com/pages/page3.html"
# response = requests.get(url)
# soup = BeautifulSoup(response.text,"html.parser")
# testlist = soup.find_all("table",{"id":"giftList"})
# for list in testlist:
# print(list.get_text())#只获取文本,没有标签
#正则表达式
import re
url = "http://www.pythonscraping.com/pages/page3.html"
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
images = soup.find_all("img",{"src":re.compile("\.\.\/img\/gifts\/img.*\.jpg")})
for image in images:
print(image["src"])
|
[
"2395618655@qq.com"
] |
2395618655@qq.com
|
f90e35118bc900eb7fd7bae46ae226fb1c0c5d5b
|
18fd2d9e1d191fef2f5f91150e02e28968c2e648
|
/acousticsim/analysis/praat/wrapper.py
|
281cac24338bbe9c2b0d64f529a1ec4c09a2873d
|
[
"MIT"
] |
permissive
|
JoFrhwld/python-acoustic-similarity
|
8f69366f1d8d019d7a6e8ebc489f54817f9640a3
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
refs/heads/master
| 2021-01-21T12:49:36.635149
| 2017-05-15T23:38:28
| 2017-05-15T23:38:28
| 91,800,742
| 5
| 2
| null | 2017-05-19T11:50:00
| 2017-05-19T11:50:00
| null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
import os
from subprocess import Popen, PIPE
import re
from acousticsim.exceptions import AcousticSimPraatError
def run_script(praat_path, script_path, *args):
com = [praat_path]
if praat_path.endswith('con.exe'):
com += ['-a']
com +=[script_path] + list(map(str,args))
err = ''
text = ''
with Popen(com, stdout=PIPE, stderr=PIPE, stdin=PIPE) as p:
try:
text = str(p.stdout.read().decode('latin'))
err = str(p.stderr.read().decode('latin'))
except UnicodeDecodeError:
print(p.stdout.read())
print(p.stderr.read())
if (err and not err.strip().startswith('Warning')) or not text:
print(args)
raise(AcousticSimPraatError(err))
return text
def read_praat_out(text):
if not text:
return None
lines = text.splitlines()
head = None
while head is None:
try:
l = lines.pop(0)
except IndexError:
print(text)
raise
if l.startswith('time'):
head = re.sub('[(]\w+[)]','',l)
head = head.split("\t")[1:]
output = {}
for l in lines:
if '\t' in l:
line = l.split("\t")
time = line.pop(0)
values = {}
for j in range(len(line)):
v = line[j]
if v != '--undefined--':
try:
v = float(v)
except ValueError:
print(text)
print(head)
else:
v = 0
values[head[j]] = v
if values:
output[float(time)] = values
return output
|
[
"michael.e.mcauliffe@gmail.com"
] |
michael.e.mcauliffe@gmail.com
|
28f9e049ec91d5c2e0ec93c0191bb5cc0c0a637a
|
d52413173437ba73ecdf822ca895e659f00a8ce7
|
/kiwibackend/doc/python/PBMailOperationMessage_PBRequest.py
|
f070f6913cf1cd1e944af7681f07a4ce67799865
|
[] |
no_license
|
whiteprism/mywork
|
2329b3459c967c079d6185c5acabd6df80cab8ea
|
a8e568e89744ca7acbc59e4744aff2a0756d7252
|
refs/heads/master
| 2021-01-21T11:15:49.090408
| 2017-03-31T03:28:13
| 2017-03-31T03:28:13
| 83,540,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
class PBMailOperationMessage_PBRequest():
def __init__(self):
self.category = -1
self.ids = []
self._type = -1
|
[
"snoster@163.com"
] |
snoster@163.com
|
254cfb11f499f91c88e065d67f9d232f7e5373f5
|
e3372811e34edd1f8d79b2a858c5c92c3e6ef187
|
/tools/infer_simple.py
|
b700782f5aa69a983dbdb8ddf3edb90dff12e94e
|
[] |
no_license
|
zhangjunyi1225054736/--object-detection
|
632481351246acaef6b0cc6aa71962c318d46a8a
|
d99f6f57cdb457ec3f2df489addfde43ab2910fc
|
refs/heads/master
| 2020-05-29T21:22:28.211748
| 2019-06-06T13:08:52
| 2019-06-06T13:08:52
| 189,377,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,301
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
import os
import sys
import pprint
import subprocess
from collections import defaultdict
from six.moves import xrange
# Use a non-interactive backend
import matplotlib
matplotlib.use('Agg')
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
import _init_paths
import nn as mynn
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from core.test import im_detect_all
from modeling.model_builder import Generalized_RCNN
import datasets.dummy_datasets as datasets
import utils.misc as misc_utils
import utils.net as net_utils
import utils.vis as vis_utils
from utils.detectron_weight_helper import load_detectron_weight
from utils.timer import Timer
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
import json
source_365 = "/mnt/md126/zhangjunyi/365-object-detection/objects365_json/objects365_Tiny_val.json"
result = []
def write_to_json(cls_boxes,image_name):
image_id = image_name.split("_")[2].split(".")[0]
image_id = int(image_id)
with open(source_365, 'r') as f:
data = json.load(f)
categories = data["categories"]
#result = []
id_list = []
for i in categories:
category_id = i["id"]
id_list.append(category_id)
#print("id_list:",id_list)
#print(len(cls_boxes))
#exit()
#return 1
for j in range(1,len(cls_boxes)):
category_id = id_list[j-1]
if len(cls_boxes[j]) != 0:
for line in cls_boxes[j]:
d = {}
x = round(float(line[0]),1)
y = round(float(line[1]),1)
w = round((float(line[2]) - float(line[0])),1)
h = round((float(line[3]) - float(line[1])),1)
bbox = [x,y,w,h]
score = round(float(line[4]),2)
d["image_id"] = image_id
d["category_id"] = category_id
d["bbox"] = bbox
d["score"] = score
result.append(d)
else:
pass
#f_json = open('result.json','w',encoding='utf-8')
#str_json=json.dump(result,f_json)
return result
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Demonstrate mask-rcnn results')
parser.add_argument(
'--dataset', required=True,
help='training dataset')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='optional config file')
parser.add_argument(
'--set', dest='set_cfgs',
help='set config keys, will overwrite config in the cfg_file',
default=[], nargs='+')
parser.add_argument(
'--no_cuda', dest='cuda', help='whether use CUDA', action='store_false')
parser.add_argument('--load_ckpt', help='path of checkpoint to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--image_dir',
help='directory to load images for demo')
parser.add_argument(
'--images', nargs='+',
help='images to infer. Must not use with --image_dir')
parser.add_argument(
'--output_dir',
help='directory to save demo results',
default="infer_outputs")
parser.add_argument(
'--merge_pdfs', type=distutils.util.strtobool, default=True)
args = parser.parse_args()
return args
def main():
"""main function"""
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
args = parse_args()
print('Called with args:')
print(args)
assert args.image_dir or args.images
assert bool(args.image_dir) ^ bool(args.images)
if args.dataset.startswith("coco"):
dataset = datasets.get_coco_dataset()
cfg.MODEL.NUM_CLASSES = 66
elif args.dataset.startswith("keypoints_coco"):
dataset = datasets.get_coco_dataset()
cfg.MODEL.NUM_CLASSES = 2
else:
raise ValueError('Unexpected dataset name: {}'.format(args.dataset))
print('load cfg from file: {}'.format(args.cfg_file))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
'Exactly one of --load_ckpt and --load_detectron should be specified.'
cfg.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS = False # Don't need to load imagenet pretrained weights
assert_and_infer_cfg()
maskRCNN = Generalized_RCNN()
if args.cuda:
maskRCNN.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.load_detectron:
print("loading detectron weights %s" % args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True, device_ids=[0]) # only support single GPU
maskRCNN.eval()
if args.image_dir:
imglist = misc_utils.get_imagelist_from_dir(args.image_dir)
else:
imglist = args.images
num_images = len(imglist)
print("num_images:", num_images)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
img_list = open("/mnt/md126/zhangjunyi/365-object-detection/VOC2007/ImageSets/Main/test.txt")
lines = img_list.readlines()
for i in xrange(len(lines)):
print('img', i)
path_dir = "/mnt/md126/zhangjunyi/365-object-detection/VOC2007/JPEGImages/"
print(lines[i].strip())
im = cv2.imread(path_dir+lines[i].strip())
assert im is not None
timers = defaultdict(Timer)
cls_boxes, cls_segms, cls_keyps = im_detect_all(maskRCNN, im, timers=timers)
write_to_json(cls_boxes, lines[i].strip())
f_json = open('result.json','w',encoding='utf-8')
str_json=json.dump(result,f_json)
#print("cls_boxes:", np.array(cls_boxes)[1].shape)
#im_name, _ = os.path.splitext(os.path.basename(imglist[i]))
'''
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2
)
'''
if args.merge_pdfs and num_images > 1:
merge_out_path = '{}/results.pdf'.format(args.output_dir)
if os.path.exists(merge_out_path):
os.remove(merge_out_path)
command = "pdfunite {}/*.pdf {}".format(args.output_dir,
merge_out_path)
subprocess.call(command, shell=True)
if __name__ == '__main__':
main()
|
[
"1225054736@qq.com"
] |
1225054736@qq.com
|
ff3b8c788d9f8fb092eb5e9315de1af5c03c17ca
|
c259bd9e4a570a1fa37949655530d778e5f5c46d
|
/mysite/.history/api/views_20211014221913.py
|
04b9cb96204ba7896ab0e5bc6b0b85d122af5423
|
[] |
no_license
|
ritikalohia/django-rest-students
|
0cc56f435b7b2af881adfd7cace54eef98213c57
|
ca5f9f466fcd74fef8ce91f019bcb6e7d83c8e20
|
refs/heads/main
| 2023-08-15T21:51:18.988691
| 2021-10-14T18:19:04
| 2021-10-14T18:19:04
| 417,219,011
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
from django.shortcuts import render
# Create your views here.
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import NoteSerializer
from .models import Student
from api import serializers
@api_view(['GET'])
def getRoutes(request):
routes = [
{
'Endpoint': '/students/',
'method': 'GET',
'body': None,
'description': 'Returns an array of notes'
},
{
'Endpoint': '/students/id',
'method': 'GET',
'body': None,
'description': 'Returns a single note object'
},
{
'Endpoint': '/students/create/',
'method': 'POST',
'body': {'body': ""},
'description': 'Creates a new note with data sent in post req'
},
{
'Endpoint': '/students/id/update/',
'method': 'PUT',
'body': {'body': ""},
'description': 'Updates an existing note with data sent in post req'
},
{
'Endpoint': '/students/id/delete/',
'method': 'DELETE',
'body': None,
'description': 'Deletes the existing node'
}
]
return Response(routes)
@api_view(['GET'])
def getNotes(request):
notes = Student.objects.all()
serializer = NoteSerializer(notes, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getNote(request, pk):
note = Student.objects.get(id=pk)
serializer = NoteSerializer(note, many=False)
return Response(serializer.data)
@api_view(['POST'])
def createNote(request):
data = request.data
note = Student.objects.create(
body=data['body']
)
serializer = NoteSerializer(note, many=False)
return Response(serializer.data)
@api_view(['PUT'])
def updateNote(request, pk):
data = request.data
note = Student.objects.get(id=pk)
serializer = NoteSerializer(note, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def deleteNote(request, pk):
note = Note.objects.get(id=pk)
note.delete()
return Response("Note was deleted")
|
[
"rtklohia@gmail.com"
] |
rtklohia@gmail.com
|
e03958b15c21ba4d88cb19941e55e1bc98cd51b9
|
3117c5e4a69b8486697c589ab3a033353be29f06
|
/sRNAtoolboxweb/setup.py
|
fccede6fdb81e7f682d6f6ecc24eac6bafc6499d
|
[] |
no_license
|
sert23/toolbox
|
963f301e1af883a55dc11db9ac6372023d85de91
|
a9af88a3164c5a6b5ace6a84a6b95d63265edf99
|
refs/heads/master
| 2023-09-01T01:46:05.808848
| 2021-11-19T16:41:25
| 2021-11-19T16:41:25
| 89,683,230
| 1
| 7
| null | 2022-12-26T19:46:46
| 2017-04-28T08:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='srnatoolboxweb',
version='2.0.0',
packages=find_packages(),
python_modules=['manage'],
include_package_data=True,
description='sRNAtoolbox Web Application',
author='Antonio Rueda, Ernesto Aparicio',
author_email='aruemar@gmail.com',
classifiers=[
'Environment :: Other Environment',
'Framework :: Django',
'Framework :: Django :: 1.11.2',
'Intended Audience :: Other Audience',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
],
install_requires=[
"Django==1.11.2",
"pytz==2017.2",
"wheel==0.24.0",
"dajax==1.3",
"xlrd==1.0.0",
"pygal==2.3.1",
"djangorestframework==3.6.3",
"django-tables2=1.7.1"
]
)
|
[
"antonio.rueda-martin@genomicsengland.co.uk"
] |
antonio.rueda-martin@genomicsengland.co.uk
|
a5e14ad2c061f0da44911042f3f9e6acc294beed
|
97dfe708031ce9d52c3309b41a8c458d7846096c
|
/setup.py
|
0ca4d1e6521e23a70ad2dd4b4444b1d1c1098d50
|
[] |
no_license
|
trainapi/trainxtract
|
2af79a9dbb7a35a374934d1968591c32fbb23f0b
|
f5c5078a20c702d0399906bbb3c07f61058a1c72
|
refs/heads/master
| 2021-08-19T07:42:26.091190
| 2017-11-25T08:02:44
| 2017-11-25T08:02:44
| 111,971,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
from setuptools import setup, find_packages
import itertools
options = dict(
name='trainxtract',
version='0.0.1',
packages=find_packages(),
license='MIT',
install_requires = ['pandas', 'click'],
entry_points = {
'console_scripts' : [
'trainxtract = trainxtract:run_app',
'trainxtract-help = trainxtract:run_help',
'trainxtract-final = trainxtract:run_final'
]
}
)
setup(**options)
|
[
"jan.pipek@gmail.com"
] |
jan.pipek@gmail.com
|
e94bbb6a791401e2764951035b0805e4e59c5088
|
e38f7b5d46fd8a65c15e49488fc075e5c62943c9
|
/pychron/hardware/tasks/hardware_preferences.py
|
dc6f92027d74b684f471ec51b37f7c612eac702b
|
[] |
no_license
|
INGPAN/pychron
|
3e13f9d15667e62c347f5b40af366096ee41c051
|
8592f9fc722f037a61b0b783d587633e22f11f2f
|
refs/heads/master
| 2021-08-15T00:50:21.392117
| 2015-01-19T20:07:41
| 2015-01-19T20:07:41
| 111,054,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,953
|
py
|
#===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import Bool, List, on_trait_change, String, Dict
from traitsui.api import View, Item, Group, VGroup, HGroup, EnumEditor
from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper
from envisage.ui.tasks.preferences_pane import PreferencesPane
#============= standard library imports ========================
#============= local library imports ==========================
class HardwarePreferences(BasePreferencesHelper):
name = 'Hardware'
preferences_path = 'pychron.hardware'
enable_hardware_server = Bool
auto_find_handle = Bool
auto_write_handle = Bool
system_lock_name = String
system_lock_address = String
enable_system_lock = Bool
system_lock_names = List
system_lock_addresses = Dict
# enable_directory_server = Bool
# directory_server_host = Str
# directory_server_port = Int
# directory_server_root = Str
@on_trait_change('system_lock_name,enable_system_lock')
def _update(self, obj, name, new):
try:
addr = self.system_lock_addresses[self.system_lock_name]
except (TypeError, KeyError):
return
self.system_lock_address = addr
class HardwarePreferencesPane(PreferencesPane):
model_factory = HardwarePreferences
category = 'Hardware'
def traits_view(self):
v = View(
VGroup(
Group(
HGroup('enable_hardware_server', Item('enable_system_lock', enabled_when='enable_hardware_server')),
# Group(
# Item('system_lock_name', editor=EnumEditor(values=self.system_lock_names),
# enabled_when='enable_system_lock'),
# Item('system_lock_address', style='readonly', label='Host'),
# enabled_when='enable_hardware_server'),
label='Remote Hardware Server',
show_border=True
),
# Group(
# Item('enable_directory_server'),
# Item('directory_server_root', enabled_when='enable_directory_server'),
# Item('directory_server_host', enabled_when='enable_directory_server'),
# Item('directory_server_port', enabled_when='enable_directory_server'),
# show_border=True,
# label='Directory Server'
# ),
Group(
'auto_find_handle',
Item('auto_write_handle', enabled_when='auto_find_handle'),
label='Serial',
show_border=True
),
),
scrollable=True
)
return v
#============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
ee6e0078648f5af14f5e6850f8790d9047604a60
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/types/keyword_plan_ad_group_service.py
|
00babde924868763787da18ec7df0fa0a2c36c13
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.resources.types import keyword_plan_ad_group
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.services',
marshal='google.ads.googleads.v8',
manifest={
'GetKeywordPlanAdGroupRequest',
'MutateKeywordPlanAdGroupsRequest',
'KeywordPlanAdGroupOperation',
'MutateKeywordPlanAdGroupsResponse',
'MutateKeywordPlanAdGroupResult',
},
)
class GetKeywordPlanAdGroupRequest(proto.Message):
r"""Request message for
[KeywordPlanAdGroupService.GetKeywordPlanAdGroup][google.ads.googleads.v8.services.KeywordPlanAdGroupService.GetKeywordPlanAdGroup].
Attributes:
resource_name (str):
Required. The resource name of the Keyword
Plan ad group to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateKeywordPlanAdGroupsRequest(proto.Message):
r"""Request message for
[KeywordPlanAdGroupService.MutateKeywordPlanAdGroups][google.ads.googleads.v8.services.KeywordPlanAdGroupService.MutateKeywordPlanAdGroups].
Attributes:
customer_id (str):
Required. The ID of the customer whose
Keyword Plan ad groups are being modified.
operations (Sequence[google.ads.googleads.v8.services.types.KeywordPlanAdGroupOperation]):
Required. The list of operations to perform
on individual Keyword Plan ad groups.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='KeywordPlanAdGroupOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
class KeywordPlanAdGroupOperation(proto.Message):
r"""A single operation (create, update, remove) on a Keyword Plan
ad group.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v8.resources.types.KeywordPlanAdGroup):
Create operation: No resource name is
expected for the new Keyword Plan ad group.
update (google.ads.googleads.v8.resources.types.KeywordPlanAdGroup):
Update operation: The Keyword Plan ad group
is expected to have a valid resource name.
remove (str):
Remove operation: A resource name for the removed Keyword
Plan ad group is expected, in this format:
``customers/{customer_id}/keywordPlanAdGroups/{kp_ad_group_id}``
"""
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=keyword_plan_ad_group.KeywordPlanAdGroup,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=keyword_plan_ad_group.KeywordPlanAdGroup,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateKeywordPlanAdGroupsResponse(proto.Message):
r"""Response message for a Keyword Plan ad group mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v8.services.types.MutateKeywordPlanAdGroupResult]):
All results for the mutate. The order of the
results is determined by the order of the
keywords in the original request.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateKeywordPlanAdGroupResult',
)
class MutateKeywordPlanAdGroupResult(proto.Message):
r"""The result for the Keyword Plan ad group mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
760d6d158cc7cb0dbb7d6a1b679e2e04aef1b9cf
|
13faa0d553ed6c6a57791db3dfdb2a0580a1695b
|
/codeforces/509-B/509-B-9647351.py
|
cc802213c7c9331abad058b7e8f889c9c36ca6d2
|
[] |
no_license
|
kautsiitd/Competitive_Programming
|
ba968a4764ba7b5f2531d03fb9c53dc1621c2d44
|
a0d8ae16646d73c346d9ce334e5b5b09bff67f67
|
refs/heads/master
| 2021-01-17T13:29:52.407558
| 2017-10-01T09:58:23
| 2017-10-01T09:58:23
| 59,496,650
| 0
| 0
| null | 2017-05-20T17:27:18
| 2016-05-23T15:56:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
n,k=map(int,raw_input().split())
a=map(int,raw_input().split())
l=min(a)
if(max(a)-min(a)>k):
print "NO"
else:
print "YES"
for i in range(n):
for j in range(l):
print 1,
for j in range(l,a[i]):
print j-l+1,
print ""
|
[
"kautsiitd@gmail.com"
] |
kautsiitd@gmail.com
|
857bfc2483daf1a2e52e74bddecde55c78c698f1
|
334d0a4652c44d0c313e11b6dcf8fb89829c6dbe
|
/checkov/dockerfile/checks/RootUser.py
|
8989d2c4ec635876e77148d43dddbbd0dbf81b70
|
[
"Apache-2.0"
] |
permissive
|
schosterbarak/checkov
|
4131e03b88ae91d82b2fa211f17e370a6f881157
|
ea6d697de4de2083c8f6a7aa9ceceffd6b621b58
|
refs/heads/master
| 2022-05-22T18:12:40.994315
| 2022-04-28T07:44:05
| 2022-04-28T07:59:17
| 233,451,426
| 0
| 0
|
Apache-2.0
| 2020-03-23T12:12:23
| 2020-01-12T20:07:15
|
Python
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
class RootUser(BaseDockerfileCheck):
def __init__(self):
name = "Ensure the last USER is not root"
id = "CKV_DOCKER_8"
supported_instructions = ["USER"]
categories = [CheckCategories.APPLICATION_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
def scan_entity_conf(self, conf):
last_user = conf[-1]
if last_user["value"] == "root":
return CheckResult.FAILED, last_user
return CheckResult.PASSED, last_user
check = RootUser()
|
[
"noreply@github.com"
] |
schosterbarak.noreply@github.com
|
3d71477099cbc4b93820335d2b9ffb4a7e41a779
|
9972675f285280948dd6becc466bc2f2d7efee8a
|
/swea/tree/practice/hip.py
|
3c7f86ee2afdab51a626dd8238d055f109a873ee
|
[] |
no_license
|
dowookims/ProblemSolving
|
308793055e8c1c247b7e00cb89d954d9a5eacf25
|
2183965b222afc7b5b316b9f53b04119384f8b24
|
refs/heads/master
| 2020-04-23T13:02:47.548120
| 2019-04-04T09:02:52
| 2019-04-04T09:02:52
| 171,189,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
'''
선형 자료구조에서 priority queue 는 선형 자료에서 O(n^2)를 차지해서 이를 대체하기 위해 나온데 힙
힙은 두가지 조건이 맞춰줘야 하는데
1. 구조적인 모습이 완전 이진트리여야 함(앞에서부터 완전히 채워져야 함)
2. 내부 논리 모습은 부모의 노드가 자식보다 항상 크거나 작아야 함(일관성)
최대 힙 : 키 값이 가장 큰 노드를 찾기 위한 완전이진트리
루트가 가장 큰 값을 가지고 있음.
최소 힙 : 최대 힙의 반대
삽입, 삭제가 존재하고, 구조를 유지시켜주게 만들어야 하는게 개발자의 숙명
힙은 프라이어티를 사용하기 위해 사용( Max, Min) 그리고 이제 루트에 있음.
그렇기에 삭제를 루트에서 진행함.
'''
|
[
"kaei2574@gmail.com"
] |
kaei2574@gmail.com
|
988d982f36e8a57ee5970b6516e61e75ee50e644
|
7f5a302eb7d93dc528f5c3a39d74f98995babbe4
|
/simplemoc/urls.py
|
eb8ee858fffb3a085d38a3d2fa2efe50e68cb49b
|
[] |
no_license
|
dennyerikson/simplemoc
|
d03a11eb63959890648c5801df3bb8a93cb1b490
|
e4199dab66e3cbe23d39765b30dcd79d5eae31bb
|
refs/heads/master
| 2020-03-23T22:54:00.859623
| 2018-07-31T19:33:35
| 2018-07-31T19:33:35
| 142,205,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
"""simplemoc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import simplemoc.core.views
urlpatterns = [
url(r'^', include('simplemoc.core.urls', namespace='core')),
# url(r'^', simplemoc.core.urls),
url(r'^admin/', admin.site.urls),
]
|
[
"dennyeriks00on@gmail.com"
] |
dennyeriks00on@gmail.com
|
200263997717b99b17f743c02a8f34453f4f5c84
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/create_value_list_request_body.py
|
4bc66144328d7383b9ed3acc25cc45db5dd9dbba
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,143
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateValueListRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'type': 'str',
'values': 'list[str]',
'description': 'str'
}
attribute_map = {
'name': 'name',
'type': 'type',
'values': 'values',
'description': 'description'
}
def __init__(self, name=None, type=None, values=None, description=None):
"""CreateValueListRequestBody - a model defined in huaweicloud sdk"""
self._name = None
self._type = None
self._values = None
self._description = None
self.discriminator = None
self.name = name
self.type = type
if values is not None:
self.values = values
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this CreateValueListRequestBody.
引用表名称,2-32位字符串组成
:return: The name of this CreateValueListRequestBody.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateValueListRequestBody.
引用表名称,2-32位字符串组成
:param name: The name of this CreateValueListRequestBody.
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this CreateValueListRequestBody.
引用表类型,参见枚举列表
:return: The type of this CreateValueListRequestBody.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CreateValueListRequestBody.
引用表类型,参见枚举列表
:param type: The type of this CreateValueListRequestBody.
:type: str
"""
self._type = type
@property
def values(self):
"""Gets the values of this CreateValueListRequestBody.
引用表的值
:return: The values of this CreateValueListRequestBody.
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this CreateValueListRequestBody.
引用表的值
:param values: The values of this CreateValueListRequestBody.
:type: list[str]
"""
self._values = values
@property
def description(self):
"""Gets the description of this CreateValueListRequestBody.
引用表描述,最长128字符
:return: The description of this CreateValueListRequestBody.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateValueListRequestBody.
引用表描述,最长128字符
:param description: The description of this CreateValueListRequestBody.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateValueListRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
7c15b9536eb43122f435abd77e7f22404733fd0b
|
9861218f60ab23d6ac3bc6b400c220abf4e64fb5
|
/atividade_d/atividade 'd' lucas neves/q6.py
|
bf7faf95dd6744f9c3889cd5c8eea96d37cd1c16
|
[] |
no_license
|
rogeriosilva-ifpi/adsi-algoritmos-2016.1
|
a0b0709eb783110a9b335c8364aa41ce4f90fb24
|
1714e2480b80e46be4d96049e878bf17b692320b
|
refs/heads/master
| 2021-06-06T18:25:00.836715
| 2016-09-07T02:02:30
| 2016-09-07T02:02:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# Nao sao estas as questoes...
# -*- coding: utf-8 -8-
"""6. Leia uma velocidade em km/h, calcule e escreva esta velocidade em m/s. (Vm/s = Vkm/h / 3.6)"""
velokm = input("Insira um velocidade em k/h: ")
veloms = velokm / 3.6
print "%.1f km/h equivale a %.1f m/s" % (velokm, veloms)
|
[
"rogerio.silva@ifpi.edu.br"
] |
rogerio.silva@ifpi.edu.br
|
3171446e524b21ed7612930789ea3980882ec432
|
45b0a75342b3af99039f7848f9556bcc5701ed16
|
/setup.py
|
d03825defd08bac677de6254ed364bf9fd539856
|
[
"BSD-3-Clause"
] |
permissive
|
simodalla/pympa-affarigenerali-OLD
|
acb18e18e68716bde99ecc9dafa67724cce81970
|
ab3c885d34a8eebcca76ccd62c3f559baede8c6d
|
refs/heads/master
| 2020-06-05T18:50:54.156687
| 2014-11-15T07:50:10
| 2014-11-15T07:50:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import organigrammi
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = organigrammi.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='openpa-organigrammi',
version=version,
description="""Your project description goes here""",
long_description=readme + '\n\n' + history,
author='Simone Dalla',
author_email='simodalla@gmail.com',
url='https://github.com/simodalla/openpa-organigrammi',
packages=[
'organigrammi',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='openpa-organigrammi',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
[
"simodalla@gmail.com"
] |
simodalla@gmail.com
|
c49023ff5224e1af90c7b2a2674dea123f28e695
|
30227ff573bcec32644fca1cca42ef4cdd612c3e
|
/leetcode/array_and_string/array/plus_one.py
|
44d2298d96b9ab1d4be89d99d9d0bc0f3e2f88eb
|
[] |
no_license
|
saurabh-pandey/AlgoAndDS
|
bc55864422c93e6c93b8432e483394f286ce8ef2
|
dad11dedea9ceb4904d6c2dea801ce0172abfc81
|
refs/heads/master
| 2023-07-01T09:12:57.951949
| 2023-06-15T12:16:36
| 2023-06-15T12:16:36
| 88,239,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
#URL: https://leetcode.com/explore/learn/card/array-and-string/201/introduction-to-array/1148/
#Description
"""
Given a non-empty array of decimal digits representing a non-negative integer, increment one to the
integer.
The digits are stored such that the most significant digit is at the head of the list, and each
element in the array contains a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself.
Example 1:
Input: digits = [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: digits = [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
Example 3:
Input: digits = [0]
Output: [1]
Constraints:
1 <= digits.length <= 100
0 <= digits[i] <= 9
"""
#TODO: Below method uses digit based addition. It would be worthwhile to try converting this list
# to number. Adding one to it and then returning the result as a list back. It would also be good to
# check which method would be faster
def plusOne(digits):
length = len(digits)
assert length > 0
plusOneDigits = []
carryForward = False
for i in range(length - 1, -1, -1):
d = digits[i]
newD = d
if i == length - 1:
newD += 1
if carryForward:
newD += 1
carryForward = False
if newD == 10:
carryForward = True
plusOneDigits.append(0)
else:
plusOneDigits.append(newD)
if carryForward:
plusOneDigits.append(1)
newLen = len(plusOneDigits)
for i in range(int(newLen/2)):
pairId = newLen - 1 - i
temp = plusOneDigits[i]
plusOneDigits[i] = plusOneDigits[pairId]
plusOneDigits[pairId] = temp
return plusOneDigits
|
[
"saurabhpandey85@gmail.com"
] |
saurabhpandey85@gmail.com
|
93be1cd42df6b6d7eb07e70bbcbcf046809e5ee2
|
8389edf9cef84ece5f94e92eee2e5efeab7fdd83
|
/AmazonWishlistScraper/pipelines.py
|
07d8b2d0af0c17bf494f389c4d078d97e66c9c98
|
[
"MIT"
] |
permissive
|
scotm/AmazonUKWishlistScraper
|
7eb6d6f7d6e408852742a55e2eadb09a626e1e99
|
2bb95e31a3204d6a92fed35a71063a3ca9c3dd35
|
refs/heads/master
| 2020-12-25T15:28:57.233678
| 2016-09-05T11:29:31
| 2016-09-05T11:29:31
| 17,141,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from scrapy.contrib.exporter import CsvItemExporter
def remove_goop(text):
return " ".join(text.split())
class AmazonCSVExport(CsvItemExporter):
fields_to_export = ["Title", "URL", "Amazon_Price", "Cheapest", "Cheapest_Condition", "Cheapest_Cost_Ratio", "Prime_Price", "Prime_Condition", "Prime_Cost_Ratio"]
class AmazonwishlistscraperPipeline(object):
def process_item(self, item, spider):
item["Prime_Condition"] = remove_goop(item["Prime_Condition"])
print item.keys()
return item
|
[
"scott.scotm@gmail.com"
] |
scott.scotm@gmail.com
|
473b9f9a9271ec67bdeb7ba8f50ec9d6a3b791e8
|
3b7474148c07df7f4755106a3d0ada9b2de5efdc
|
/django/projects/src/backup/www/views.py
|
ea9233e355201b26cb5f6e7b1d346d901de37605
|
[] |
no_license
|
juancsosap/pythontraining
|
7f67466846138f32d55361d64de81e74a946b484
|
1441d6fc9544042bc404d5c7efffd119fce33aa7
|
refs/heads/master
| 2021-08-26T05:37:15.851025
| 2021-08-11T22:35:23
| 2021-08-11T22:35:23
| 129,974,006
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('<h1>This is the Root</h1>')
|
[
"juan.c.sosa.p@gmail.com"
] |
juan.c.sosa.p@gmail.com
|
036a2e26605dcb8adca9e12f913f0e989a923bfb
|
aaad70e69d37f92c160c07e4ca03de80becf2c51
|
/filesystem/usr/lib/python3.6/_pyio.py
|
b805c49e9f62dd53e7b3ff719af305f55238b214
|
[] |
no_license
|
OSWatcher/ubuntu-server
|
9b4dcad9ced1bff52ec9cdb4f96d4bdba0ad3bb9
|
17cb333124c8d48cf47bb9cec1b4e1305626b17a
|
refs/heads/master
| 2023-02-10T18:39:43.682708
| 2020-12-26T01:02:54
| 2020-12-26T01:02:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
{
"MIME": "text/plain",
"inode_type": "REG",
"magic_type": "Python script, ASCII text executable",
"mode": "-rw-r--r--",
"sha1": "f88fc8316a266e9690fc63943b95eb39ae884d95"
}
|
[
"mathieu.tarral@protonmail.com"
] |
mathieu.tarral@protonmail.com
|
a4c5738a473170741a1f6b08a464fd02088bf873
|
7dae5c2536a2e3e2f8efa0480aa71a08a311849d
|
/migrations/versions/4ad6d590a93f_pass_secure.py
|
2d72ec77439f25976b87a3777cdca21e95dfdab5
|
[
"MIT"
] |
permissive
|
Derrick-Nyongesa/QwertyBlog
|
9a2997d116db39c551193b54f0aecbdb896646da
|
aa534c6fd475d4cf58f559ef7159b5cefe0ceed3
|
refs/heads/main
| 2023-04-24T05:18:13.975385
| 2021-05-01T11:15:11
| 2021-05-01T11:15:11
| 363,079,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
"""pass_secure
Revision ID: 4ad6d590a93f
Revises: f59a0d1927f8
Create Date: 2021-04-29 14:06:19.867578
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4ad6d590a93f'
down_revision = 'f59a0d1927f8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
|
[
"nyongesaderrick@gmail.com"
] |
nyongesaderrick@gmail.com
|
7fe5a14eea56e4e00848afe7f1069cd9f0415125
|
330aebe4ce7110310cccf68cfe6a4488a78f315e
|
/samples/RiskManagement/DecisionManager/dm-with-decisionprofilereject-response.py
|
6f40ef12e14e17569321bceeb2dbca2ca4ffcceb
|
[
"MIT"
] |
permissive
|
shalltell/cybersource-rest-samples-python
|
c8afdae73af8aaa14606f989d7baea8abbf41638
|
d92375fb1878ee810f4028a8850e97398533dbd0
|
refs/heads/master
| 2022-11-01T03:23:33.492038
| 2022-01-28T11:29:44
| 2022-01-28T11:29:44
| 222,025,982
| 0
| 0
|
MIT
| 2019-11-16T00:43:46
| 2019-11-16T00:43:45
| null |
UTF-8
|
Python
| false
| false
| 3,994
|
py
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def dm_with_decisionprofilereject_response():
clientReferenceInformationCode = "54323007"
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode
)
paymentInformationCardNumber = "4444444444444448"
paymentInformationCardExpirationMonth = "12"
paymentInformationCardExpirationYear = "2020"
paymentInformationCard = Riskv1decisionsPaymentInformationCard(
number = paymentInformationCardNumber,
expiration_month = paymentInformationCardExpirationMonth,
expiration_year = paymentInformationCardExpirationYear
)
paymentInformation = Riskv1decisionsPaymentInformation(
card = paymentInformationCard.__dict__
)
orderInformationAmountDetailsCurrency = "USD"
orderInformationAmountDetailsTotalAmount = "144.14"
orderInformationAmountDetails = Riskv1decisionsOrderInformationAmountDetails(
currency = orderInformationAmountDetailsCurrency,
total_amount = orderInformationAmountDetailsTotalAmount
)
orderInformationBillToAddress1 = "96, powers street"
orderInformationBillToAdministrativeArea = "NH"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Clearwater milford"
orderInformationBillToFirstName = "James"
orderInformationBillToLastName = "Smith"
orderInformationBillToPhoneNumber = "7606160717"
orderInformationBillToEmail = "test@visa.com"
orderInformationBillToPostalCode = "03055"
orderInformationBillTo = Riskv1decisionsOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
phone_number = orderInformationBillToPhoneNumber,
email = orderInformationBillToEmail,
postal_code = orderInformationBillToPostalCode
)
orderInformation = Riskv1decisionsOrderInformation(
amount_details = orderInformationAmountDetails.__dict__,
bill_to = orderInformationBillTo.__dict__
)
riskInformationProfileName = "profile2"
riskInformationProfile = Ptsv2paymentsRiskInformationProfile(
name = riskInformationProfileName
)
riskInformation = Riskv1decisionsRiskInformation(
profile = riskInformationProfile.__dict__
)
requestObj = CreateBundledDecisionManagerCaseRequest(
client_reference_information = clientReferenceInformation.__dict__,
payment_information = paymentInformation.__dict__,
order_information = orderInformation.__dict__,
risk_information = riskInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = DecisionManagerApi(client_config)
return_data, status, body = api_instance.create_bundled_decision_manager_case(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling DecisionManagerApi->create_bundled_decision_manager_case: %s\n" % e)
if __name__ == "__main__":
dm_with_decisionprofilereject_response()
|
[
"gnongsie@visa.com"
] |
gnongsie@visa.com
|
6107c9b99f080aadc2604b747d9111565a5bf906
|
b16ebbadfd630b92068645ec671df8182fadf4fc
|
/registration_redux/signals.py
|
1b40767539169cfd6ba31f89e35edb5c20c4ecc9
|
[] |
no_license
|
IOEWRC/stu_teach
|
352bb7326645eacf340131c632eab5a28549d393
|
953931febab33d69a71c83e9ca44d376aa1d7320
|
refs/heads/master
| 2022-12-17T15:08:49.148449
| 2018-10-02T15:26:41
| 2018-10-02T15:26:41
| 141,098,799
| 0
| 3
| null | 2022-12-08T01:02:04
| 2018-07-16T06:57:08
|
Python
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
from django.conf import settings
from django.contrib.auth import get_backends
from django.contrib.auth import login
from django.dispatch import Signal
# An admin has approved a user's account
user_approved = Signal(providing_args=["user", "request"])
# A new user has registered.
user_registered = Signal(providing_args=["user", "request"])
# A user has activated his or her account.
user_activated = Signal(providing_args=["user", "request"])
def login_user(sender, user, request, **kwargs):
""" Automatically authenticate the user when activated """
backend = get_backends()[0] # Hack to bypass `authenticate()`.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
login(request, user)
request.session['REGISTRATION_AUTO_LOGIN'] = True
request.session.modified = True
if getattr(settings, 'REGISTRATION_AUTO_LOGIN', False):
user_activated.connect(login_user)
|
[
"pawanpaudel93@gmail.com"
] |
pawanpaudel93@gmail.com
|
4c51d5551470c2a1accda560407256b08e83f5f4
|
ad5b72656f0da99443003984c1e646cb6b3e67ea
|
/src/bindings/python/src/openvino/test_utils/__init__.py
|
39abacb8fe3698459c5cead72b53daa782322b5c
|
[
"Apache-2.0"
] |
permissive
|
novakale/openvino
|
9dfc89f2bc7ee0c9b4d899b4086d262f9205c4ae
|
544c1acd2be086c35e9f84a7b4359439515a0892
|
refs/heads/master
| 2022-12-31T08:04:48.124183
| 2022-12-16T09:05:34
| 2022-12-16T09:05:34
| 569,671,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .test_utils_api import compare_functions
|
[
"noreply@github.com"
] |
novakale.noreply@github.com
|
92977a8e1a7fba7692d6e2a1e648ce2923636a61
|
d6fe71e3e995c03b8f5151ab1d53411b77b325ba
|
/walklist_api_service/models/inline_response2014.py
|
dd612deaed7f317b142c07f218d808e84fcda9a9
|
[] |
no_license
|
mwilkins91/petpoint-scraper
|
95468ae9951deaa8bd3bef7d88c0ff660146c1a3
|
dd0c60c68fc6a7d11358aa63d28fdf07fff3c7cd
|
refs/heads/master
| 2022-11-27T00:02:50.654404
| 2020-08-09T18:41:40
| 2020-08-09T18:41:40
| 286,180,666
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
# coding: utf-8
"""
The Enrichment List
The THS enrichment list # noqa: E501
OpenAPI spec version: 1.0.0
Contact: contactme@markwilkins.co
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def getResponse():
from walklist_api_service.models.response import Response
return Response
class InlineResponse2014(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'payload': 'CustomContent',
'meta': 'ResponseMeta'
}
if hasattr(getResponse(), "swagger_types"):
swagger_types.update(getResponse().swagger_types)
attribute_map = {
'payload': 'payload',
'meta': 'meta'
}
if hasattr(getResponse(), "attribute_map"):
attribute_map.update(getResponse().attribute_map)
def __init__(self, payload=None, meta=None, *args, **kwargs): # noqa: E501
"""InlineResponse2014 - a model defined in Swagger""" # noqa: E501
self._payload = None
self._meta = None
self.discriminator = None
if payload is not None:
self.payload = payload
if meta is not None:
self.meta = meta
Response.__init__(self, *args, **kwargs)
@property
def payload(self):
"""Gets the payload of this InlineResponse2014. # noqa: E501
:return: The payload of this InlineResponse2014. # noqa: E501
:rtype: CustomContent
"""
return self._payload
@payload.setter
def payload(self, payload):
"""Sets the payload of this InlineResponse2014.
:param payload: The payload of this InlineResponse2014. # noqa: E501
:type: CustomContent
"""
self._payload = payload
@property
def meta(self):
"""Gets the meta of this InlineResponse2014. # noqa: E501
:return: The meta of this InlineResponse2014. # noqa: E501
:rtype: ResponseMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this InlineResponse2014.
:param meta: The meta of this InlineResponse2014. # noqa: E501
:type: ResponseMeta
"""
self._meta = meta
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2014, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2014):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"contactme@markwilkins.co"
] |
contactme@markwilkins.co
|
82d4c92e0a76a51b782ab665521c5d0dbd3f5d41
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/552.py
|
2a8dec0e8ed629d049222f52c7953ccea67f7d3e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
def test(list):
for i in range(0, len(list)-1):
if(valueList[i] > valueList[i+1]):
return False
return True
def list2text(list):
result = "";
for i in list:
if(result != "" or i!=0):
result += str(i)
if(result == ""):
result = "0";
return result
t = int(input())
for line in range(1, t + 1):
value = input();
valueList = list(value);
for i in range(0, len(valueList)):
valueList[i] = int(valueList[i])
valueInteger = int(value);
result = valueInteger
#print( valueList)
#phase 1 go from first index to back and stop when fails
while(not test(valueList)):
for i in range(0, len(valueList)-1):
if valueList[i] > valueList[i+1]:
valueList[i] -= 1
for j in range(i+1, len(valueList)):
valueList[j] = 9
break
#print(valueList);
print("Case #{}: {}".format(line, list2text(valueList)));
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.