blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73f8f75df4969f97b4a998348fe16dcc440d2d48
|
7d02813987b49c2a69d92b9b2fdf5148af37274f
|
/case/Meet/testAEContactsList.py
|
63942a80c243f1797dfec6ebb3cadd7c2807d850
|
[] |
no_license
|
xgh321324/api_test
|
29e01cbe5f0b7c2df25fb7e781cedf8031140c72
|
2575495baac3ab90adab7a7a85904c38a78dd4b7
|
refs/heads/master
| 2022-07-23T19:54:39.320828
| 2022-07-02T09:13:35
| 2022-07-02T09:13:35
| 129,185,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
#coding:utf-8
import requests
import unittest
import time
from common.login import LG
from common.logger import Log
from common.Excel import Excel_util
from common.Hash import get_digit,get_sign
class Contact(unittest.TestCase):
def setUp(self):
self.s = requests.session()
self.lgin = LG(self.s) #实例化登录类
self.uid_token = self.lgin.login() #直接取第二部登录
self.header = {'User-Agent': 'LanTingDoctor/1.3.1 (iPad; iOS 10.1.1; Scale/2.00)',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN;q=1',
'Content-Type': 'application/json',
'requestApp': '3',
'requestclient': '2',
'versionForApp': '2.0',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'}
self.log = Log()#实例化日志的类
self.excel = Excel_util(r'C:\Users\Administrator\Desktop\interface_testcase.xls')
def test_contact_list(self):
u'联系人列表接口'
self.log.info('参会人列表接口测试开始')
url = 'http://api.meet.sunnycare.cc/v2/contact/records'
json_data = {
"token":self.uid_token,
"nonce": get_digit(),
"timestamp": str(int(time.time()))
}
#入参加密
json_data['sign'] = get_sign(json_data)
r = self.s.post(url,headers = self.header,json=json_data)
self.log.info('参会人列表返回内容是:%s' % r.json())
conten = r.json()['data']['content']
contact_code = {}
j = 1
for i in conten:
contact_code['contact_code'+str(j)] = i['contact_code']
j += 1
#将contact_code写入excel供其他借口调用
self.excel.write_value(15,6,contact_code)
self.log.info('参会人列表接口测试结束!')
def tearDown(self):
self.s.close()
if __name__=='__main__':
unittest.main()
|
[
"34511103+xgh321324@users.noreply.github.com"
] |
34511103+xgh321324@users.noreply.github.com
|
95667a1114228dc710f3b65a3610f04a9a086f0a
|
2b682a01d19960e2039e2e064a742289b30da62c
|
/SConsArguments/tar.py
|
bc76ba439c220b692ae99a89c81ab1e85f7ee5f7
|
[
"MIT"
] |
permissive
|
mcqueen256/scons-arguments
|
952a427977c42161802225464e99bfeb4e5e9fd5
|
f4b783fc79fe3fc16e8d0f58308099a67752d299
|
refs/heads/master
| 2021-01-01T16:11:53.403454
| 2017-02-15T19:46:28
| 2017-02-15T19:46:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
"""`SConsArguments.tar`
Defines arguments related to tar archiver
**Arguments**
Programs:
TAR
The tar archiver
Flags for programs:
TARFLAGS
General options passed to the tar archiver
"""
#
# Copyright (c) 2017 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
from SConsArguments.Util import flags2list
from SConsArguments.Importer import export_arguments
_all_arguments = {
'TAR' : {
'help' : 'The tar archiver',
'metavar' : 'PROG'
},
'TARFLAGS' : {
'help' : 'General options passed to the tar archiver',
'metavar' : 'FLAGS',
'converter' : flags2list
},
}
_groups = {
'progs' : [ 'TAR' ],
'flags' : [ 'TARFLAGS' ]
}
def arguments(**kw):
"""Returns argument declarations for 'tar' tool
:Keywords:
include_groups : str | list
include only arguments assigned to these groups
exclude_groups : str | list
exclude arguments assigned to these groups
tar_include_groups : str | list
include only arguments assigned to these groups, this has
higher priority than **include_groups**
tar_exclude_groups : str | list
exclude arguments assigned to these groups, this has higher
priority than **exclude_groups**
"""
return export_arguments('tar', _all_arguments, _groups, **kw)
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
486c5c4863977b8e538c9d79f11f23a270e47d2a
|
b478d1e63cce432b6fd3692c0aa7a84f411ae9dc
|
/net_prog/ch1/test3.py
|
c94c114170bd57be0c4c29c64f58045d0441b891
|
[] |
no_license
|
yiqing95/py_study
|
8d414aa00b4ac31070fe5667a98815980eee46d0
|
6ce6b46ad729a795bc9253d6339169e62ef47766
|
refs/heads/master
| 2016-09-06T17:45:26.081269
| 2015-01-12T15:22:29
| 2015-01-12T15:22:29
| 20,810,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
__author__ = 'yiqing'
import socket
def get_remote_machine_info():
remote_host = 'www.python.com'
try:
print("IP address : %s " % socket.gethostbyname(remote_host))
except socket.error as err:
print("%s " % err)
if __name__ == '__main__':
get_remote_machine_info()
|
[
"yiqing-95@qq.com"
] |
yiqing-95@qq.com
|
e0747aef526c8d78bb3770f3ba3343fd6496a2ce
|
d52413173437ba73ecdf822ca895e659f00a8ce7
|
/kiwibackend/application/website/mobile/views/gmapi/instance.py
|
af1bb96d97b926f195022a516f2f0c28cf6178d2
|
[] |
no_license
|
whiteprism/mywork
|
2329b3459c967c079d6185c5acabd6df80cab8ea
|
a8e568e89744ca7acbc59e4744aff2a0756d7252
|
refs/heads/master
| 2021-01-21T11:15:49.090408
| 2017-03-31T03:28:13
| 2017-03-31T03:28:13
| 83,540,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
# -*- coding: utf-8 -*-
from decorators import *
from api import *
from opensocial.http import HttpResponseJson
from module.common.static import ErrorCode
from django.conf import settings
@handle_verification
def query_player_raceinstance(request):
'''
查询用户活动副本
'''
playerId = request.REQUEST.get("playerId", "").strip()
serverId = request.REQUEST.get("serverId", str(settings.SERVERID)).strip()
player = get_player_by_id_or_str(playerId, int(serverId))
resdata = {}
if not player:
resdata["success"] = False
resdata["message"] = u"该玩家不存在!"
resdata["data"] = []
return HttpResponseJson(resdata)
instances = player.raidinstances.all().values()
data = []
for instance in instances:
meta = instance.to_dict()
data.append(meta)
resdata["success"] = True
resdata["message"] = ""
resdata["data"] = data
return HttpResponseJson(resdata)
# if not player:
# data = {"success": False,
# "message": "The user does not exist!",
# "errorcode": ErrorCode.ERROR_PLAYER_IS_NONE
# }
# return HttpResponseJson(data)
# instances = player.raidinstances.all().values()
# data = []
# for instance in instances:
# meta = instance.to_dict()
# data.append(meta)
# return HttpResponseJson(data)
@handle_verification
def query_player_elementtower(request):
'''
查询用户元素之塔
'''
playerId = request.REQUEST.get("playerId", "").strip()
serverId = request.REQUEST.get("serverId", str(settings.SERVERID)).strip()
player = get_player_by_id_or_str(playerId, int(serverId))
resdata = {}
if not player:
resdata["success"] = False
resdata["message"] = u"该玩家不存在!"
resdata["data"] = {}
return HttpResponseJson(resdata)
data = player.elementTower.to_dict()
resdata["success"] = True
resdata["message"] = ""
resdata["data"] = data
return HttpResponseJson(resdata)
# if not player:
# data = {"success": False,
# "message": "The user does not exist!",
# "errorcode": ErrorCode.ERROR_PLAYER_IS_NONE
# }
# return HttpResponseJson(data)
# return HttpResponseJson(player.elementTower.to_dict())
|
[
"snoster@163.com"
] |
snoster@163.com
|
ac3a26558056f3fc9a3935f967d3e2c606441a03
|
3d61fe0f49f5d344fc32a6faa799f0a46deec9a5
|
/2020/AoC-2020-8.py
|
706a0af5c3c9f154ced7975039174fb55ed93ec7
|
[] |
no_license
|
sbeaumont/AoC
|
558296fd26cd5272e33d3cb9113c09e4945c98ac
|
406eda614d8434d8feb71fe1262f1fda54972a12
|
refs/heads/master
| 2022-12-13T07:38:36.089775
| 2022-12-04T21:11:49
| 2022-12-04T21:11:49
| 75,467,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
#!/usr/bin/env python3
"""Solution for Advent of Code challenge 2020 - Day 8"""
__author__ = "Serge Beaumont"
__date__ = "December 2020"
from copy import deepcopy
with open(f"AoC-2020-8-input.txt") as infile:
program_file = [line.strip().split() for line in infile.readlines()]
for line in program_file:
line[1] = int(line[1])
def run_program(program):
visited = list()
pointer = 0
accumulator = 0
while pointer not in visited:
visited.append(pointer)
instruction = program[pointer]
if instruction[0] == 'acc':
accumulator += instruction[1]
pointer += 1
elif instruction[0] == 'nop':
pointer += 1
elif instruction[0] == 'jmp':
pointer += instruction[1]
else:
assert False, f"Instruction {instruction} not known"
if pointer >= len(program):
return "normal", accumulator
return 'looped', accumulator
if __name__ == '__main__':
print("Part 1:", run_program(program_file)[1])
for i in range(len(program_file)):
operator = program_file[i][0]
if operator in ['jmp', 'nop']:
proggy = deepcopy(program_file)
proggy[i][0] = 'nop' if operator == 'jmp' else 'jmp'
result, accumulator = run_program(proggy)
if result == 'normal':
print("Part 2:", accumulator)
break
|
[
"sbeaumont@xebia.com"
] |
sbeaumont@xebia.com
|
515efa1f6a90f9f2e0e52cc7abd3fddc39d81afb
|
8c1aa957a41954daac70b13f1be06df0c4046bb2
|
/wagtailwebsitebuilder/home/migrations/0017_auto_20200422_0845.py
|
6ca88cea553b2a0fffa682871e8b1086cfdd800a
|
[] |
no_license
|
hanztura/wagtailwebsitebuilder
|
6c1a2358d53877e4f70d70e5c7c6b472fabec974
|
f56d1b799f9eda53b5596ed882b60df154581cc5
|
refs/heads/master
| 2021-05-21T08:30:16.170885
| 2020-08-29T22:35:59
| 2020-08-29T22:35:59
| 252,619,323
| 1
| 0
| null | 2021-04-16T20:26:46
| 2020-04-03T03:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# Generated by Django 2.2.12 on 2020-04-22 08:45
from django.db import migrations
import puputextension.helpers
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0016_auto_20200412_1525'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('with_id', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('paragraph', wagtail.core.blocks.RichTextBlock())], template='home/blocks/with_id.html')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('code', wagtail.core.blocks.StructBlock([('language', wagtail.core.blocks.ChoiceBlock(blank=False, choices=[('python3', 'Python 3'), ('bash', 'Bash/Shell'), ('javascript', 'Javascript'), ('css', 'CSS'), ('html', 'HTML')], null=False)), ('caption', wagtail.core.blocks.CharBlock(blank=True, nullable=True, required=False)), ('code', puputextension.helpers.CodeTextBlock())])), ('image', wagtail.images.blocks.ImageChooserBlock())]),
),
]
|
[
"hanztura@github.com"
] |
hanztura@github.com
|
25c71476cf84f80af692fc6fbeb2506beb383991
|
8697dbe95cfbdc4c0df211c8f809bcaaf473a36f
|
/pendulum/__version__.pyi
|
bc75e0e25899f0df91b9d34c814d5a441699f106
|
[
"MIT"
] |
permissive
|
Michael0x2a/pendulum-stubs
|
9ef1b03c76ea7aa6eff56c890dab65801672a299
|
5a1189f2d39e1a1974cf0acf686db4b7f01bf8db
|
refs/heads/master
| 2020-04-19T02:29:28.303289
| 2019-02-10T17:23:30
| 2019-02-10T17:23:30
| 167,904,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
pyi
|
# Stubs for pendulum.__version__ (Python 3.7)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
__version__: str = ...
|
[
"michael.lee.0x2a@gmail.com"
] |
michael.lee.0x2a@gmail.com
|
035e3ff6b199e3d228fa8e688242053edc7dff29
|
1a758ef862f733d98ddd8ebc8ade5cefd95c24f2
|
/customers/migrations/0027_auto_20170508_2340.py
|
9962ba06eee066980375d3484ef93bfaf2f713d6
|
[] |
no_license
|
ajajul/ReactJS_Python
|
f116b35394666c5b3f2419eb5d8d7aeb077d4a24
|
08310d56fa88f326ddbfdd4b189f2a3a71f76d99
|
refs/heads/master
| 2020-03-19T03:16:57.510672
| 2018-06-01T10:36:36
| 2018-06-01T10:36:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customers', '0026_auto_20170428_1403'),
]
operations = [
migrations.AlterField(
model_name='coffeereview',
name='order',
field=models.ForeignKey(related_name='reviews', to='customers.Order'),
),
migrations.AlterUniqueTogether(
name='coffeereview',
unique_together=set([('order', 'coffee')]),
),
]
|
[
"web.expert@aol.com"
] |
web.expert@aol.com
|
b676d38523f8e46480dd315d996f0d67698a85e9
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/xbookcomvn.py
|
3195cfea973ea8288043a4a74beb690127d93d2e
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755
| 2019-10-22T07:53:41
| 2019-10-22T07:53:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div/font[@class='CTieuDeNhoNho']",
'price' : "//table//tr/td[2]/font/div/font/b",
'category' : "//div[@id='content']/table[1]/tbody/tr/td[1]/table/tbody/tr[1]/td[@class='CTieuDeNho']",
'description' : "//div[@id='content']/table[1]/tbody/tr/td[1]/table/tbody/tr[2]/td/table/tbody/tr[2]/td/p",
'images' : "//div[@id='content']/table//tr/td/table//tr[2]/td/table/tbody/tr[1]/td/table//tr/td[1]/table//tr[1]/td/a/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'xbook.com.vn'
allowed_domains = ['xbook.com.vn']
start_urls = ['http://www.xbook.com.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['CatId=', 'NewsId=']), 'parse_item'),
Rule(LinkExtractor(allow=['CategoryLoai=+\d+$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"nguyenchungthuy.hust@gmail.com"
] |
nguyenchungthuy.hust@gmail.com
|
80c17d8f71b2c154e6a79fc0e019d8b3749683f8
|
eaf54e6c022b748fd0d04f076634aafdfad5f69f
|
/motors/tools/enable_test.py
|
a873643226d5ae664ca70200712378dfb4e4abd4
|
[] |
no_license
|
PenguPilot/PenguPilot
|
91f131effa11a3c1ef47abb161463772325ae63b
|
9cb08836789cf17b9de57517040188c79765046b
|
refs/heads/ng-wip
| 2020-03-28T00:41:06.634109
| 2015-11-19T16:06:00
| 2015-11-19T16:06:00
| 7,300,320
| 78
| 30
| null | 2015-11-19T16:06:00
| 2012-12-24T00:18:48
|
C
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
#!/usr/bin/env python
"""
___________________________________________________
| _____ _____ _ _ _ |
| | __ \ | __ (_) | | | |
| | |__) |__ _ __ __ _ _ _| |__) || | ___ | |_ |
| | ___/ _ \ '_ \ / _` | | | | ___/ | |/ _ \| __| |
| | | | __/ | | | (_| | |_| | | | | | (_) | |_ |
| |_| \___|_| |_|\__, |\__,_|_| |_|_|\___/ \__| |
| __/ | |
| GNU/Linux based |___/ Multi-Rotor UAV Autopilot |
|___________________________________________________|
OMAP3-PWM Motor Test Program
Copyright (C) 2014 Tobias Simon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. """
from time import sleep
from sys import argv
from scl import scl_get_socket
socket = scl_get_socket('mot_en', 'push')
sleep(0.5)
m1 = int(argv[1])
m2 = int(argv[2])
m3 = int(argv[3])
m4 = int(argv[4])
socket.send([m1, m2, m3, m4])
|
[
"tobias.simon@tu-ilmenau.de"
] |
tobias.simon@tu-ilmenau.de
|
0690830ce3ac14f34a1c85b94348aab7a7d9b37f
|
1db2e2238b4ef9c1b6ca3b99508693ee254d6904
|
/develop/md_sim_holo_analysis/plot_cc_data.py
|
d5b6fd2975790d699c8e3b82243558e3a33f4170
|
[] |
no_license
|
pgreisen/pythonscripts
|
8674e08095f76edf08ef2059300349218079724c
|
0aadf8f96d19b306c1bc44a772e766a06fe3408b
|
refs/heads/master
| 2021-07-06T23:54:57.774342
| 2021-06-08T19:36:36
| 2021-06-08T19:36:36
| 22,017,192
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from numpy import *
import sys
def get_matrix(datafile):
return matrixfile
def main():
inputfile = sys.argv[1]
with open(inputfile, 'r') as f:
for line in f:
print len(line.split())
if __name__ == "__main__":
main()
|
[
"pgreisen@gmail.com"
] |
pgreisen@gmail.com
|
c5b358ad0899de087b7ee223a7bcd45f3ad3f642
|
a333ef95f7deeb7a0a6ee4700beb022dc7649256
|
/ecommerce/forms.py
|
e21e2d600b125de9ddfc4520c15ab34d25323cb8
|
[] |
no_license
|
gmachielsen/fullstackproject
|
3884dc2b301c3aeab1eb6aa025159754e5a3b9ea
|
7a4879d9fb83ec5c83ff39ea12f7986deae4cfcc
|
refs/heads/master
| 2020-07-27T04:25:04.494803
| 2019-10-07T13:35:43
| 2019-10-07T13:35:43
| 208,867,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"id": "form_full_name",
"placeholder":"Uw volledige naam"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder":"Uw e-mailadres"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder":"Uw bericht"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
|
[
"g.machielsen@gmail.com"
] |
g.machielsen@gmail.com
|
03e28e10b2f413a51c84df41bcf5de6ac327d799
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/messenger/proto/bw_chat2/__init__.py
|
ac4f1deb4dfe87e1d77ff2abdd87fb006403ac1c
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,907
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/proto/bw_chat2/__init__.py
from messenger import g_settings
from messenger.m_constants import MESSENGER_SCOPE, PROTO_TYPE
from messenger.proto.bw_chat2 import chat_handlers
from messenger.proto.bw_chat2.VOIPChatProvider import VOIPChatProvider
from messenger.proto.bw_chat2.VOIPChatController import VOIPChatController
from messenger.proto.events import g_messengerEvents
from messenger.proto.interfaces import IProtoPlugin
from messenger.proto.bw_chat2.provider import BWChatProvider
from messenger.proto.bw_chat2.UsersHandler import UsersHandler
class BWProtoPlugin(IProtoPlugin):
__slots__ = ('__provider', '__adminChat', '__users', '__arenaChat', '__battleCmd', '__unitChat', '__voipProvider', '__voipCtrl', '__isConnected')
def __init__(self):
super(BWProtoPlugin, self).__init__()
self.__provider = None
self.__adminChat = None
self.__users = None
self.__arenaChat = None
self.__battleCmd = None
self.__unitChat = None
self.__voipProvider = None
self.__voipCtrl = None
self.__isConnected = False
return
@property
def arenaChat(self):
return self.__arenaChat
@property
def battleCmd(self):
return self.__battleCmd
@property
def unitChat(self):
return self.__unitChat
@property
def adminChat(self):
return self.__adminChat
@property
def provider(self):
return self.__provider
@property
def voipProvider(self):
return self.__voipProvider
@property
def voipController(self):
return self.__voipCtrl
@property
def users(self):
return self.__users
def isConnected(self):
return self.__isConnected
def connect(self, scope):
if scope != MESSENGER_SCOPE.BATTLE:
self.__arenaChat.leave()
if not self.__isConnected:
self.__isConnected = True
self.__voipCtrl.start()
g_messengerEvents.onPluginConnected(PROTO_TYPE.BW_CHAT2)
def view(self, scope):
self.__provider.setEnable(True)
self.__battleCmd.switch(scope)
def disconnect(self):
if not self.__isConnected:
return
self.__isConnected = False
self.__arenaChat.disconnect()
self.__unitChat.disconnect()
self.__voipProvider.leave()
self.__voipCtrl.stop()
self.__provider.setEnable(False)
g_messengerEvents.onPluginDisconnected(PROTO_TYPE.BW_CHAT2)
def goToReplay(self):
self.__provider.goToReplay()
self.__battleCmd.goToReplay()
def setFilters(self, msgFilterChain):
self.__provider.setFilters(msgFilterChain)
def init(self):
self.__provider = BWChatProvider()
self.__adminChat = chat_handlers.AdminChatCommandHandler(self.__provider)
self.__adminChat.registerHandlers()
self.__users = UsersHandler(self.__provider)
self.__users.registerHandlers()
self.__arenaChat = chat_handlers.ArenaChatHandler(self.__provider, self.__adminChat)
self.__arenaChat.registerHandlers()
self.__battleCmd = chat_handlers.BattleChatCommandHandler(self.__provider)
self.__battleCmd.registerHandlers()
self.__unitChat = chat_handlers.UnitChatHandler(self.__provider, self.__adminChat)
self.__unitChat.registerHandlers()
self.__voipProvider = VOIPChatProvider(self.__provider)
self.__voipProvider.registerHandlers()
self.__voipCtrl = VOIPChatController()
def clear(self):
if self.__arenaChat:
self.__arenaChat.unregisterHandlers()
self.__arenaChat.clear()
self.__arenaChat = None
if self.__battleCmd:
self.__battleCmd.unregisterHandlers()
self.__battleCmd.clear()
self.__battleCmd = None
if self.__unitChat:
self.__unitChat.unregisterHandlers()
self.__unitChat.clear()
self.__unitChat = None
if self.__adminChat:
self.__adminChat.unregisterHandlers()
self.__adminChat.clear()
self.__adminChat = None
if self.__voipProvider:
self.__voipProvider.unregisterHandlers()
self.__voipProvider.clear()
self.__voipProvider = None
if self.__voipCtrl:
self.__voipCtrl.stop()
self.__voipCtrl = None
if self.__provider:
self.__provider.clear()
self.__provider = None
if self.__users:
self.__users.clear()
self.__users = None
return
def onActionReceived(self, actionID, reqID, args):
if g_settings.server.BW_CHAT2.isEnabled():
self.__provider.onActionReceived(actionID, reqID, args)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
620b7c3b1f1b3ae6cb598abf93fc176deb65cae5
|
5c0c0176db0ccf2c24b6b5ed459a8dc144518b13
|
/nni/nas/tensorflow/base_mutator.py
|
860680f199278d3fd38910b82e7661b17d2f652e
|
[
"MIT"
] |
permissive
|
petuum/nni
|
ac4f4a1c4d6df71684eeffa127b7c4858fd29e97
|
8134be6269902939232482d63649c06f9864be6d
|
refs/heads/master
| 2023-02-18T11:21:41.078889
| 2021-01-20T03:21:50
| 2021-01-20T03:21:50
| 302,736,456
| 4
| 3
|
MIT
| 2020-11-20T20:21:15
| 2020-10-09T19:34:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tensorflow.keras import Model
from .mutables import Mutable, MutableScope, InputChoice
from .utils import StructuredMutableTreeNode
class BaseMutator(Model):
def __init__(self, model):
super().__init__()
self.__dict__['model'] = model
self._structured_mutables = self._parse_search_space(self.model)
def _parse_search_space(self, module, root=None, prefix='', memo=None, nested_detection=None):
if memo is None:
memo = set()
if root is None:
root = StructuredMutableTreeNode(None)
if module not in memo:
memo.add(module)
if isinstance(module, Mutable):
if nested_detection is not None:
raise RuntimeError('Cannot have nested search space. Error at {} in {}'
.format(module, nested_detection))
module.name = prefix
module.set_mutator(self)
root = root.add_child(module)
if not isinstance(module, MutableScope):
nested_detection = module
if isinstance(module, InputChoice):
for k in module.choose_from:
if k != InputChoice.NO_KEY and k not in [m.key for m in memo if isinstance(m, Mutable)]:
raise RuntimeError('"{}" required by "{}" not found in keys that appeared before, and is not NO_KEY.'
.format(k, module.key))
for submodule in module.layers:
if not isinstance(submodule, Model):
continue
submodule_prefix = prefix + ('.' if prefix else '') + submodule.name
self._parse_search_space(submodule, root, submodule_prefix, memo=memo, nested_detection=nested_detection)
return root
@property
def mutables(self):
return self._structured_mutables
def undedup_mutables(self):
return self._structured_mutables.traverse(deduplicate=False)
def call(self, *inputs):
raise RuntimeError('Call is undefined for mutators.')
def __setattr__(self, name, value):
if name == 'model':
raise AttributeError("Attribute `model` can be set at most once, and you shouldn't use `self.model = model` to "
"include your network, as it will include all parameters in model into the mutator.")
return super().__setattr__(name, value)
def enter_mutable_scope(self, mutable_scope):
pass
def exit_mutable_scope(self, mutable_scope):
pass
def on_forward_layer_choice(self, mutable, *inputs):
raise NotImplementedError
def on_forward_input_choice(self, mutable, tensor_list):
raise NotImplementedError
def export(self):
raise NotImplementedError
|
[
"noreply@github.com"
] |
petuum.noreply@github.com
|
108d1dfb0862d12efa5e05cbbe676147dcf2ad65
|
32daa457e295c74b96c99f74a6a3031cf03c571e
|
/aliyun-python-sdk-onsmqtt/aliyunsdkonsmqtt/request/v20191211/ApplyTokenRequest.py
|
c2f46482b8d34454284f4fd68b823522489df47f
|
[
"Apache-2.0"
] |
permissive
|
BertonLan/aliyun-openapi-python-sdk
|
0836057c888f7534f37b0001fe2a338c6d505e8e
|
fd9723c2a800b991179231c1ac4bc92dd8bb5934
|
refs/heads/master
| 2022-04-23T16:57:26.354904
| 2020-04-22T02:51:45
| 2020-04-22T02:51:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,664
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ApplyTokenRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OnsMqtt', '2019-12-11', 'ApplyToken','onsmqtt')
self.set_method('POST')
def get_ExpireTime(self):
return self.get_query_params().get('ExpireTime')
def set_ExpireTime(self,ExpireTime):
self.add_query_param('ExpireTime',ExpireTime)
def get_Resources(self):
return self.get_query_params().get('Resources')
def set_Resources(self,Resources):
self.add_query_param('Resources',Resources)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Actions(self):
return self.get_query_params().get('Actions')
def set_Actions(self,Actions):
self.add_query_param('Actions',Actions)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
69f21e60bf08fd723da51b261ecc602779004ec5
|
3eb20ec4cf54cd01fc71caa3e8561ef1ff80b893
|
/revyoume_club/admin.py
|
00b452e6560ea90fc25b570e531a788c63bcb318
|
[] |
no_license
|
hottredpen/laowai_panda
|
38f0db4f6d848ed0b7b6449d1bc77aa952dac695
|
e862bbe64b84698b7981176c7c190775edf99a67
|
refs/heads/master
| 2023-01-03T05:11:01.012487
| 2020-10-12T08:04:19
| 2020-10-12T08:04:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
from django.contrib import admin
from .models import *
from solo.admin import SingletonModelAdmin
from django import forms
from django.contrib import messages
# Register your models here.
class PostInline(admin.TabularInline):
model = Post
extra = 0
readonly_fields = ('text', 'type',
'media', 'channel', 'liked_by_users',)
def has_add_permission(self, request):
return False
class PostAdmin(admin.ModelAdmin):
list_filter = ('type', 'show_in', 'channel')
search_fields = ('text',)
list_display = ('text', 'type', 'show_in', 'channel', 'likes')
readonly_fields = ('liked_by_users',)
def save_model(self, request, obj, form, change):
error = ""
if obj.type == Post.TXT_IMAGE and obj.media == "":
error = "Please upload an image to the media."
elif obj.type == Post.TXT_VIDEO and obj.media == "" and not obj.youko_link:
error = "Please upload an video to the media or add youko url."
else:
super().save_model(request, obj, form, change)
return obj
self.message_user(request, error, messages.ERROR)
return obj
class ChannelAdmin(admin.ModelAdmin):
inlines = [PostInline, ]
list_display = ('name',)
admin.site.register(RevyoumeClubSetting, SingletonModelAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Channel, ChannelAdmin)
|
[
"893287656@qq.com"
] |
893287656@qq.com
|
cc4076f02f63594508a9d96b275032e977bddf42
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03003/s019657668.py
|
2a8dd04d77c9270c431b31d84ff7f1a1871ddf1e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
#15:35
h,w = map(int,input().split())
a = list(map(int,input().split()))
b = list(map(int,input().split()))
mod = 10 ** 9 + 7
now = [1 for _ in range(w+1)]
for i in range(h):
last = now
now = [1]
for j in range(w):
if a[i] == b[j]:
now.append((last[j+1]+now[-1])%mod)
else:
now.append((last[j+1]+now[-1]-last[j])%mod)
#print(now)
print(now[-1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9778a8ca7ff30e47c535cb48b6e916ec98ae7099
|
c9dc1df17ecb9e279eb4403b83358363cdbe7fee
|
/project/urls.py
|
3c97f6855709a1835bcf8389736c05b2eba7bea8
|
[] |
no_license
|
m0nte-cr1st0/keyua
|
c3894a94c9bfe73409078be11cb1d3f64831054c
|
b964ebb7e260fbebdbc27e3a571fed6278196cac
|
refs/heads/master
| 2022-11-25T16:03:51.882386
| 2020-01-09T12:57:54
| 2020-01-09T12:57:54
| 232,809,529
| 0
| 0
| null | 2022-11-22T02:24:49
| 2020-01-09T12:58:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
"""IncheckSite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from project.landing.views import GoogleSiteVerificationView
urlpatterns = [
# WYSIWYG HTML editor
url(r'^tinymce/', include('tinymce.urls')),
url('keyua-admin/filebrowser/', admin.site.urls),
url('grappelli/', include('grappelli.urls')),
# API
url(r'^api/', include('project.api.urls')),
# Blog
url(r'^blog/', include('project.blog.urls')),
# admin side
url(r'^keyua-admin/', admin.site.urls),
#Other files
url(r'^googlef65a9f395670d60e.html/$', GoogleSiteVerificationView.as_view(), name='google-file'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# It's important to hold it on this place
urlpatterns += [
url(r'^', include('project.landing.urls')),
]
|
[
"dinamo.mutu111@gmail.com"
] |
dinamo.mutu111@gmail.com
|
dd23642f0663cc57638424411a54c21671bcc149
|
35a88ca38bb850b5c82d8a4e4de430d1d48660b7
|
/www_dytt8_net/www_dytt8_net/spiders/dytt8.py
|
b7eca5b30f78c900600914c17c3e5cbc47451ade
|
[] |
no_license
|
gyc567/spider_world
|
3798cf854efcaacc4918c82358836480e6245a11
|
4bf04e5a4b0578cd7a28c14f3c10f9a0cad63f7c
|
refs/heads/master
| 2020-05-22T02:14:12.210582
| 2019-05-07T12:34:06
| 2019-05-07T12:34:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
# -*- coding: utf-8 -*-
import re
import scrapy
import sys
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader.processors import Compose
from www_dytt8_net.items import WwwDytt8NetItem
class Dytt8Spider(CrawlSpider):
__ERROR_INFO = "很抱歉,您要访问的页面已被删除或不存在。"
name = 'dytt8'
allowed_domains = ['www.dytt8.net']
start_urls = ['http://www.dytt8.net/']
rules = (
# 追踪除游戏外的所有列表页
Rule(LinkExtractor(deny=r'.*game.*', allow='.*/index\.html')),
# 对下一页进行追踪
Rule(LinkExtractor(restrict_xpaths=u'//a[text()="下一页"]')),
# 对文章进行提取并回调给parse_item处理, 过滤掉游戏
Rule(LinkExtractor(allow=r'.*/\d+/\d+\.html', deny=r".*game.*"), callback='parse_item', follow=True),
)
def parse_item(self, response):
if self.__ERROR_INFO in response.text:
return
item = WwwDytt8NetItem()
item['title'] = response.xpath('//div[@class="title_all"]/h1/font/text()').extract_first()
item['publish_time'] = response.xpath('//div[@class="co_content8"]/ul/text()').extract_first().strip().replace('发布时间:', '')
imgs_xpath = response.xpath('//div[@id="Zoom"]//img')
item['images'] = [i.xpath('./@src').extract_first() for i in imgs_xpath if i.xpath('./@src')]
item['download_links'] = re.compile('<a href="(ftp://.*?)">').findall(response.text)
item['contents'] = [i.strip().replace('\n', '').replace('\r', '') for i in response.xpath('string(//div[@id="Zoom"])').extract()]
yield item
|
[
"funblessu@gmail.com"
] |
funblessu@gmail.com
|
5cb10b21ff50792c7351d990d7a45c0414b624f8
|
8040c2be85e686df30143600ed91100b6094812a
|
/csv-file-operation/csvFileGen2.py
|
c3d7c7992c25a52c3e574525bb8070c6b63f51b7
|
[] |
no_license
|
eLtronicsVilla/Miscellaneous-Useful-code
|
0b6e3d6dd4b9feca3364f98929e26ee9da3221af
|
cc5be1e8b8e9b0d2f49f2abcba16b2548bf4a41e
|
refs/heads/master
| 2021-08-07T23:16:08.088315
| 2020-05-22T19:38:17
| 2020-05-22T19:38:17
| 181,491,048
| 0
| 0
| null | 2019-05-18T09:06:46
| 2019-04-15T13:19:55
|
Python
|
UTF-8
|
Python
| false
| false
| 703
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 16:23:34 2019
@author: brijesh Gupta
"""
import csv
import os
import sys
import time
with open('Test.csv','wb') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['SN','Date','objectName','Path','imageName','No of Objects','Comments'])
todays_date = time.strftime("%Y-%m-%d %H:%M:%S")
SN = 0
no_of_objects = input("Enter the no of objects present in a file: ")
comments = input("Enter the comments on the current image: ")
filewriter.writerow([str(SN),str(todays_date),'Eye',str(os.getcwd()),str(sys.argv[0]),no_of_objects,comments])
csvfile.close()
|
[
"eltronicsvilla17@gmail.com"
] |
eltronicsvilla17@gmail.com
|
f09f51e942e96e258583d9deb9f7490ac54883aa
|
f3fdfdf714e23ef69c9ce6631c188f1ebc328546
|
/setup.py
|
a65c551e0c1b8aae6a58f0ff2a4edfb44f1e6111
|
[
"BSD-2-Clause"
] |
permissive
|
liujie40/PSpider
|
bf2a134812ce81357588b260cee9e3d039c73df0
|
f1162c777ec87250edfd2532882eb15b8d712e6a
|
refs/heads/master
| 2022-02-21T18:20:41.468852
| 2022-01-19T06:55:54
| 2022-01-19T06:56:00
| 112,547,656
| 1
| 0
| null | 2017-11-30T01:17:47
| 2017-11-30T01:17:47
| null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="3.0.4",
author="xianhu",
keywords=["spider", "crawler"],
packages=find_packages(exclude=("test.*",)),
package_data={
"config": ["*.conf"], # include all *.conf files
},
install_requires=[]
)
|
[
"qixianhu@qq.com"
] |
qixianhu@qq.com
|
4380d953b1142157b79abdcc6ac89919e7e88fc9
|
90f545733f076747bad979faa3a8cf23867f7a3a
|
/HS5f.py
|
a49ff24bdca897a53c0c4690860b5d5d81f31ae2
|
[] |
no_license
|
kunweiTAN/techgym_ai
|
f85dc52ce6e75f4c08213d5796171908beb9a69e
|
051274bcc789a563c46ed5661301535e76ae1e18
|
refs/heads/master
| 2023-08-17T05:15:18.758183
| 2021-09-21T11:57:07
| 2021-09-21T11:57:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
#AI-TECHGYM-4-7-Q-1(AI-TECHGYM-3-20-Q-1)
#回帰問題と分類問題
#インポート
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, stratify = iris.target, random_state=0)
models = {
'AdaBoost': ,
'GradientBoost':
}
#正解率
scores = {}
for model_name, model in models.items():
model.fit(X_train, y_train)
scores[(model_name, 'train_score')] =
scores[(model_name, 'test_score')] =
#表示
display(pd.Series(scores).unstack())
|
[
"noreply@github.com"
] |
kunweiTAN.noreply@github.com
|
1cfebbe7fd51cc5599eba80d68db0e7cd7e7fbdd
|
2d694018e5f1ca0d8a60e2ecd3debc094a0ce9a2
|
/venv/Scripts/autopep8-script.py
|
03573f16b86b67d8a2fcfee0dfb8e380b465a0ce
|
[] |
no_license
|
gajanantadas/Ecommerce_project
|
9a380fd5c0c37440b3d48982a9aac742d6831d2a
|
0b0251d30f8a10a79f72cc8dfb2780d99e62fe05
|
refs/heads/master
| 2023-03-05T03:51:02.690861
| 2021-02-15T07:43:34
| 2021-02-15T07:43:34
| 338,998,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#!"G:\New folder\Ecommerce_project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')()
)
|
[
"gajanantadas3@gmail.com"
] |
gajanantadas3@gmail.com
|
d74d978814589cb360e60156707fb640d5ac8a75
|
6b587069460046cefbb3f2d18bafbbe4ffbc00d1
|
/further_study.py
|
74b6ba5f552ad93ec1ae9135307eced4fe91d5bd
|
[] |
no_license
|
rachel-lynch-lin/list-slicing
|
bd992c84ff5865d2b9010e80f9fc01648f161906
|
d598df20941f34a6e993b73897cfada38f52ca88
|
refs/heads/master
| 2020-03-21T06:40:38.214931
| 2018-06-22T00:06:16
| 2018-06-22T00:06:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,103
|
py
|
"""Custom implementations of several standard Python list methods.
Implement your own versions of Python's standard list methods, as functions.
You should use only the primitive operations from Part 1 and 2 in your
implementations. For loops are also allowed, such as the following:
for element in some_list:
# Do something with element
Each function imitates a built-in list method, as described by the docstring
for each function.
Play with the built-in methods in the Python REPL to get a feel
for how they work before trying to write your custom version.
"""
from list_operations import *
def custom_len(input_list):
"""Return number of items in the list.
The function custom_len(input_list) should have
the same functionality and result as len(input_list).
For example:
>>> custom_len(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'])
8
"""
counter = 0
for i in input_list:
counter += 1
return counter
# For the next four exercises, you'll need to be clever and think about ways
# to use list slice assignment.
#
# NOTE: these are especially contrived. You wouldn't really want
# to typically append things to a list like this (you'd want to use the
# list.append() method), but we want you to practice list slicing assignment
# in different ways so it sticks in your brain.
def custom_append(input_list, value):
"""Add the value to the end of the list.
The function custom_append(input_list, value) should have the same
functionality as input_list.append(value) where value is added to the
end of the list and the function returns nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> custom_append(notes, 'Re')
>>> notes == ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do', 'Re']
True
"""
input_list[len(input_list):] = [value]
def custom_extend(input_list, second_list):
"""Append every item in second_list to input_list.
Like input_list.extend(second_list), custom_extend(input_list, second_list)
should append every item in the second list to the end of the first list
and return nothing.
For example:
>>> months = ['Jan', 'Feb', 'Mar']
>>> custom_extend(months, ['Apr', 'May'])
>>> months == ['Jan', 'Feb', 'Mar', 'Apr', 'May']
True
"""
input_list[len(input_list):] = second_list
def custom_insert(input_list, index, value):
"""Insert value at index in the list.
Like input_list.insert(index, value), should insert (not replace) the value
at the specified index of the input list and return nothing.
For example:
>>> months = ['Jan', 'Mar']
>>> custom_insert(months, 1, 'Feb')
>>> months == ['Jan', 'Feb', 'Mar']
True
"""
input_list[index:index] = [value]
def custom_remove(input_list, value):
"""Remove the first item of the value in list.
The function custom_remove(input_list, value) should have the same
functionality as input_list.remove(value) where the first item of
the value specified is removed and the function returns nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> custom_remove(notes, 'Do')
>>> notes == ['Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
True
"""
for i in range(len(input_list)):
if input_list[i] == value:
del input_list[i]
break
def custom_pop(input_list):
"""Remove the last item in the list and returns it.
The function custom_pop(input_list) should have the same functionality
and result as input_list.pop().
For example:
>>> months = ['Jan', 'Feb', 'March']
>>> custom_pop(months)
'March'
>>> months
['Jan', 'Feb']
"""
last_item = input_list[-1]
del input_list[-1]
return last_item
def custom_index(input_list, value):
"""Return the index of the first item of value found in input_list.
The function custom_index(input_list, value) should have the same
functionality and result as input_list.index(value).
For example:
>>> custom_index(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'], 'Re')
1
"""
for i in range(len(input_list)):
if input_list[i] == value:
return i
def custom_count(input_list, value):
"""Return the number of times value appears in the list.
Like input_list.count(value), custom_count(input_list, value) should
return the number of times the specified value appears in the list.
For example:
>>> custom_count(['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do'], 'Do')
2
"""
count = 0
for i in range(len(input_list)):
if input_list[i] == value:
count += 1
return count
def custom_reverse(input_list):
"""Reverse the elements of the input_list.
Like input_list.reverse(), custom_reverse(input_list) should reverse the
elements of the original list and return nothing (we call this reversing
"in place").
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> custom_reverse(multiples)
>>> multiples == [27, 24, 21, 18, 15, 12, 9, 6, 3, 0]
True
"""
for i in range(len(input_list)):
input_list[i:i] = [input_list[-1]]
del input_list[-1]
def custom_contains(input_list, value):
"""Return True or False if value is in the input_list.
Like (value in input_list), should return True if the list contains the
specified value and False if it does not. Remember, do not use the `if X in Y`
statement -- find another way to solve it!
For example:
>>> custom_contains([0, 3, 6, 9, 12, 15, 18, 21, 24], 23)
False
>>> custom_contains([0, 3, 6, 9, 12, 15, 18, 21, 24], 24)
True
"""
for num in input_list:
if num == value:
return True
return False
def custom_equality(some_list, another_list):
"""Return True if passed lists are identical, False otherwise.
Like (some_list == another_list), custom_equality(some_list, another_list)
should return True if both lists contain the same values in the same indexes.
For example:
>>> custom_equality(['Jan', 'Feb', 'Mar'], ['Jan', 'Feb', 'Mar'])
True
>>> custom_equality(['Jan', 'Feb', 'Mar'], ['Jan', 'Mar', 'Feb'])
False
"""
for i in range(len(some_list)):
if some_list[i] == another_list[i]:
continue
else:
return False
return True
##############################################################################
# Please ask for a code review. Also, give your partner a high-five!
##############################################################################
# This is the part were we actually run the doctests.
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed == 0:
print("ALL TESTS PASSED")
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
839bbb95ff3a972b5ab6d75ef01bd4339081612a
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/inspur/sm/plugins/modules/download_auto_screenshot.py
|
7c8f830ec5e4a390bc12d58743e4817959ff16e9
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: download_auto_screenshot
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Download auto screenshots.
description:
- Download auto screenshots on Inspur server.
options:
file_url:
description:
- Screen capture file path.
type: str
required: true
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: Screen test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Download auto screenshots"
inspur.sm.download_auto_screenshot:
file_url: "/home/wbs/screen"
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class Screen(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=False)
def run_command(self):
self.module.params['subcommand'] = 'downscreen'
self.results = get_connection(self.module)
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict(
file_url=dict(type='str', required=True),
)
argument_spec.update(ism_argument_spec)
screen_obj = Screen(argument_spec)
screen_obj.work()
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
e4bf678fe219d6b3554fa9948f51af523385ce03
|
b2b79cc61101ddf54959b15cf7d0887d114fb4e5
|
/web/pgadmin/tools/debugger/tests/test_restart_debugger.py
|
6c30562bf91dcb925a73c5ff388af96cf835ee8a
|
[
"PostgreSQL"
] |
permissive
|
99Percent/pgadmin4
|
8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8
|
5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a
|
refs/heads/master
| 2021-10-10T20:08:48.321551
| 2021-09-30T12:51:43
| 2021-09-30T12:51:43
| 165,702,958
| 0
| 0
|
NOASSERTION
| 2019-01-14T17:18:40
| 2019-01-14T17:18:39
| null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils as utils
from . import utils as debugger_utils
from unittest.mock import patch
from regression import parent_node_dict
from pgadmin.browser.server_groups.servers.databases.schemas.functions \
.tests import utils as funcs_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as db_utils
class RestartDebugger(BaseTestGenerator):
""" This class will Restart the debugger """
scenarios = utils.generate_scenarios('restart_debugger',
debugger_utils.test_cases)
def setUp(self):
super(RestartDebugger, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_id = self.schema_data['schema_id']
local_self = funcs_utils.set_up(self)
func_name = "test_function_%s" % str(uuid.uuid4())[1:8]
function_info = funcs_utils.create_function(
local_self.server, local_self.db_name, local_self.schema_name,
func_name)
self.func_id = function_info[0]
if self.add_extension:
debugger_utils.add_extension(self, utils, db_utils=db_utils)
init_debugger = debugger_utils.init_debugger_function(self)
self.trans_id = json.loads(init_debugger.data)['data']['trans_id']
if self.init_target:
debugger_utils.initialize_target(self, utils)
def restart_debugger(self):
return self.tester.get(
self.url + str(self.trans_id),
content_type='application/json')
def runTest(self):
"""
This function will initialize the debugger for function and procedures.
"""
if self.is_positive_test:
response = self.restart_debugger()
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.restart_debugger()
else:
response = self.restart_debugger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function delete the server from SQLite """
debugger_utils.close_debugger(self)
debugger_utils.delete_function(self, utils)
db_utils.disconnect_database(self, self.server_id, self.db_id)
|
[
"akshay.joshi@enterprisedb.com"
] |
akshay.joshi@enterprisedb.com
|
efda514ddc0e46df56878a94c5569a740445e0fb
|
d429c131df32789e11a98e9e965e652176fcee97
|
/454B - Little Pony and Sort by Shift.py
|
8dcfd32741c9cd4e29c47a0c82bd29701b71a480
|
[] |
no_license
|
shan-mathi/Codeforces
|
a11841a1ef1a1ef78e3d506d58d9fdf4439421bd
|
6f8166b79bea0eb1f575dbfc74c252ba71472c7e
|
refs/heads/main
| 2023-06-15T08:25:41.130432
| 2021-06-24T10:36:06
| 2021-06-24T10:36:06
| 341,176,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
#118706045 Jun/07/2021 14:53UTC+5.5 Shan_XD 454B - Little Pony and Sort by Shift PyPy 3 Accepted 218 ms
def one_shift(n,x):
c = x.copy()
c.sort()
if c ==x:
return 0
set =0
for i in range(n):
if x[i+1] < x[i] and not set:
set = 1
if (x[i+1:] + x[:i+1])== c:
return n-i-1
else:
return -1
if x[i+1] < x[i] and set:
return -1
return n - i -1
n = int(input())
x = list(map(int, input().split()))
print(one_shift(n,x))
|
[
"noreply@github.com"
] |
shan-mathi.noreply@github.com
|
f0d75c548356b509d1ce973bd3524ff051486610
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part004907.py
|
f15c185acaf516f2e92f4364b40ff14c731692d7
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher80951(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher80951._instance is None:
CommutativeMatcher80951._instance = CommutativeMatcher80951()
return CommutativeMatcher80951._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 80950
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
f79e2cae087ea96ee7bf206c8c58102f233d61a4
|
aadad415f425b9f45fed14290235488a46687a4f
|
/2009/bastieri/cadmio/passosbagliato/wavelength.py
|
54e7ed3023e3d79258a794b356218a90c4540593
|
[] |
no_license
|
enucatl-university/lab-unipd
|
c1fdae198ccc3af3f75ad07554e148427a9cc096
|
c197bb92f479913c1183375fa22fd1619e6bbad4
|
refs/heads/master
| 2023-08-15T01:59:55.502505
| 2016-11-11T19:20:13
| 2016-11-11T19:20:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
from __future__ import division
import math
from ROOT import TGraph, TF1, TCanvas
def to_decimal(deg):
int = math.floor(deg)
rem = 5*(deg - int) / 3
return int + rem
class WaveLength(object):
def __init__(self, file_name):
self.output_file = file_name + '.out'
with open(file_name) as input_file:
with open(self.output_file, 'w') as output:
for line in input_file:
o, n1, n2 = [float(x) for x in line.split()]
n1, n2 = to_decimal(n1), to_decimal(n2)
angle = ((n1 - 180) + n2)*math.pi/360
sine = math.sin(angle)
out_string = str(o) + ' ' + str(sine) + '\n'
output.write(out_string)
def fit_graph(self):
self.graph = TGraph(self.output_file)
self.func = TF1('line', 'pol1', -6, 6)
self.graph.Fit('line', 'QW')
self.slope = self.func.GetParameter(1)
#canv = TCanvas('can', 'can')
self.graph.SetMarkerStyle(8)
#self.graph.Draw('AP')
def get_separation(self, wavelen):
self.separation = math.fabs(wavelen / self.slope)
return self.separation
|
[
"gmatteo..abis@gmail.com"
] |
gmatteo..abis@gmail.com
|
522f43d045aead4510090ccba73165183d45dd2a
|
a3cf848e37683b45ea570578e398ab85f1ca4732
|
/DEMO/write_excel.py
|
c874f2fbbb1796ab96bf428e6653507c492782df
|
[
"MIT"
] |
permissive
|
AceCoooool/python-example
|
7f456f702ecc59909d500bcf62e478d0a86082de
|
1d0068627210f08d31f027b6a333118d9f743956
|
refs/heads/master
| 2020-04-18T07:27:45.200465
| 2019-02-24T11:22:26
| 2019-02-24T11:22:26
| 167,360,679
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
import xlwt
if __name__ == '__main__':
# Workbook is created
wb = xlwt.Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
sheet1.write(1, 0, 'ISBT DEHRADUN')
sheet1.write(2, 0, 'SHASTRADHARA')
sheet1.write(3, 0, 'CLEMEN TOWN')
sheet1.write(4, 0, 'RAJPUR ROAD')
sheet1.write(5, 0, 'CLOCK TOWER')
sheet1.write(0, 1, 'ISBT DEHRADUN')
sheet1.write(0, 2, 'SHASTRADHARA')
sheet1.write(0, 3, 'CLEMEN TOWN')
sheet1.write(0, 4, 'RAJPUR ROAD')
sheet1.write(0, 5, 'CLOCK TOWER')
wb.save('../data/csv/example.xls')
|
[
"tinyshine@yeah.net"
] |
tinyshine@yeah.net
|
ecb2639a5a72d1cd7a09a16340dc5cfff6926757
|
55eda01bdcbda99f72cfdf0b29afb5ea36756873
|
/arxiv/kdgan/mdlcompr_xw/train_kd.py
|
5fac90bbad9ad11f7d44ec75643db75b3b861de5
|
[] |
no_license
|
yyht/KDGAN
|
7489a0ca1a2f044b6bcb7cd8bb0d6f2dae1da5e7
|
8f1367d242d7d174bf5bb2740aa18e3846d7b521
|
refs/heads/master
| 2020-05-16T08:36:18.872239
| 2019-01-12T04:17:31
| 2019-01-12T04:17:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,738
|
py
|
from kdgan import config
from kdgan import metric
from kdgan import utils
from flags import flags
from data_utils import AffineGenerator
from gen_model import GEN
from tch_model import TCH
import data_utils
from os import path
from tensorflow.contrib import slim
import math
import os
import time
import numpy as np
import tensorflow as tf
mnist = data_utils.read_data_sets(flags.dataset_dir,
one_hot=True,
train_size=flags.train_size,
valid_size=flags.valid_size,
reshape=True)
datagen = AffineGenerator(mnist)
tn_size, vd_size = mnist.train.num_examples, mnist.test.num_examples
print('tn size=%d vd size=%d' % (tn_size, vd_size))
tn_num_batch = int(flags.num_epoch * tn_size / flags.batch_size)
print('tn #batch=%d' % (tn_num_batch))
eval_interval = int(tn_size / flags.batch_size)
print('ev #interval=%d' % (eval_interval))
tn_gen = GEN(flags, mnist.train, is_training=True)
tn_tch = TCH(flags, mnist.train, is_training=True)
scope = tf.get_variable_scope()
scope.reuse_variables()
vd_gen = GEN(flags, mnist.test, is_training=False)
vd_tch = TCH(flags, mnist.test, is_training=False)
tf.summary.scalar(tn_gen.learning_rate.name, tn_gen.learning_rate)
tf.summary.scalar(tn_gen.kd_loss.name, tn_gen.kd_loss)
summary_op = tf.summary.merge_all()
init_op = tf.global_variables_initializer()
tot_params = 0
for variable in tf.trainable_variables():
num_params = 1
for dim in variable.shape:
num_params *= dim.value
print('%-50s (%d params)' % (variable.name, num_params))
tot_params += num_params
print('%-50s (%d params)' % (' '.join(['kd', flags.kd_model]), tot_params))
def main(_):
bst_acc, bst_epk = 0.0, 0
writer = tf.summary.FileWriter(config.logs_dir, graph=tf.get_default_graph())
with tf.train.MonitoredTrainingSession() as sess:
sess.run(init_op)
tn_gen.saver.restore(sess, flags.gen_model_ckpt)
tn_tch.saver.restore(sess, flags.tch_model_ckpt)
ini_gen = metric.eval_mdlcompr(sess, vd_gen, mnist)
ini_tch = metric.eval_mdlcompr(sess, vd_tch, mnist)
start = time.time()
# for tn_batch in range(tn_num_batch):
# tn_image_np, tn_label_np = mnist.train.next_batch(flags.batch_size)
tn_batch = -1
for epoch in range(flags.num_epoch):
for tn_image_np, tn_label_np in datagen.generate(batch_size=flags.batch_size):
tn_batch += 1
feed_dict = {vd_tch.image_ph:tn_image_np}
soft_logit_np, = sess.run([vd_tch.logits], feed_dict=feed_dict)
feed_dict = {
tn_gen.image_ph:tn_image_np,
tn_gen.hard_label_ph:tn_label_np,
tn_gen.soft_logit_ph:soft_logit_np,
}
_, summary = sess.run([tn_gen.kd_update, summary_op], feed_dict=feed_dict)
writer.add_summary(summary, tn_batch)
if (tn_batch + 1) % eval_interval != 0:
continue
feed_dict = {
vd_gen.image_ph:mnist.test.images,
vd_gen.hard_label_ph:mnist.test.labels,
}
acc = sess.run(vd_gen.accuracy, feed_dict=feed_dict)
if acc > bst_acc:
bst_acc = max(acc, bst_acc)
bst_epk = epoch
tot_time = time.time() - start
global_step = sess.run(tn_gen.global_step)
avg_time = (tot_time / global_step) * (tn_size / flags.batch_size)
print('#%08d curacc=%.4f curbst=%.4f tot=%.0fs avg=%.2fs/epoch' %
(tn_batch, acc, bst_acc, tot_time, avg_time))
if acc <= bst_acc:
continue
# save gen parameters if necessary
tot_time = time.time() - start
ini_gen *= 100
bst_acc *= 100
bst_epk += 1
print('#mnist=%d %s@%d=%.2f iniacc=%.2f et=%.0fs' %
(tn_size, flags.kd_model, bst_epk, bst_acc, ini_gen, tot_time))
if __name__ == '__main__':
tf.app.run()
|
[
"xiaojiew1@student.unimelb.edu.au"
] |
xiaojiew1@student.unimelb.edu.au
|
03f83b3a694301dea1d55fc6a15c0c9f2974f189
|
c67f2d0677f8870bc1d970891bbe31345ea55ce2
|
/zippy/lib-python/3/test/test_pep263.py
|
598d980b2a67e289dfefb23de2265ee229c13b0e
|
[
"BSD-3-Clause"
] |
permissive
|
securesystemslab/zippy
|
a5a1ecf5c688504d8d16128ce901406ffd6f32c2
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
refs/heads/master
| 2022-07-05T23:45:36.330407
| 2018-07-10T22:17:32
| 2018-07-10T22:17:32
| 67,824,983
| 324
| 27
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 1,852
|
py
|
# -*- coding: koi8-r -*-
import unittest
from test import support
class PEP263Test(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
"ðÉÔÏÎ".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\ð".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile(b"\n# coding: utf-8\nu = '\xc3\xb3'\n", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['u'], '\xf3')
def test_issue2301(self):
try:
compile(b"# coding: cp932\nprint '\x94\x4e'", "dummy", "exec")
except SyntaxError as v:
self.assertEqual(v.text, "print '\u5e74'\n")
else:
self.fail()
def test_issue4626(self):
c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['\xc6'], '\xc6')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_main():
support.run_unittest(PEP263Test)
if __name__=="__main__":
test_main()
|
[
"thezhangwei@gmail.com"
] |
thezhangwei@gmail.com
|
b73c82ceb84172586c6092a5ce99dceb1c4cfeb1
|
fe06311a7de13a02ca0be37d84c542c3cece3f33
|
/Chapter35/file_35_1_3b.py
|
ea3649be523e31c869c950e58a5182dc36a0424f
|
[] |
no_license
|
mooksys/Python_Algorithms
|
a4a84ddabc34ec4b7cc0ac01d55019880af38514
|
375817e3dfdec94411cf245fe3f685a69d92b948
|
refs/heads/master
| 2020-08-24T06:35:05.791979
| 2018-07-30T01:22:24
| 2018-07-30T01:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
ELEMENTS_OF_A = 20
ELEMENTS_OF_B = 30
# 입력받은 값을 리스트 a와 b에 각각 저장한다.
a = [None] * ELEMENTS_OF_A
b = [None] * ELEMENTS_OF_B
for i in range(ELEMENTS_OF_A):
a[i] = float(input())
for i in range(ELEMENTS_OF_B):
b[i] = float(input())
# 리스트 new_arr를 생성한다.
new_arr = []
for element in a:
new_arr.append(element)
for element in b:
new_arr.append(element)
# 리스트 new_arr를 출력한다.
for element in new_arr:
print(element, end = "\t")
|
[
"jeipubmanager@gmail.com"
] |
jeipubmanager@gmail.com
|
f8066949fde26242a622104d46dbf942a6148195
|
78f3fe4a148c86ce9b80411a3433a49ccfdc02dd
|
/2012 and earlier/superdonors-20121023/graphic_config.py
|
f8301f5d3df1c9e94821f63c2824fad43e9647b1
|
[] |
no_license
|
nprapps/graphics-archive
|
54cfc4d4d670aca4d71839d70f23a8bf645c692f
|
fe92cd061730496cb95c9df8fa624505c3b291f8
|
refs/heads/master
| 2023-03-04T11:35:36.413216
| 2023-02-26T23:26:48
| 2023-02-26T23:26:48
| 22,472,848
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1o6fwDxOQI70FxDnK7_PaXad8viYcTkY7Q1a_B6uzdlE'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
[
"ahurt@npr.org"
] |
ahurt@npr.org
|
7a047c3eaac3fc87eeeb8acc8bd41efee40e159c
|
897871d09b8b1e86c5a48599839ba9534260f2c9
|
/aromawine3-new_update__with_checkout/admin_manage_notification/admin.py
|
d94ca7e5b51a57bff11127a6e62b3e27e9e735ab
|
[] |
no_license
|
sidkushwah123/wine
|
0b8b8fdf44068b4488d5f1ae5d34a24d3fff19a9
|
bb29e84fb4a0709aca36e819ae6191147a9691b5
|
refs/heads/main
| 2023-07-27T14:03:06.814484
| 2021-09-11T15:25:39
| 2021-09-11T15:25:39
| 405,354,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.contrib import admin
from .models import AwNotification
from import_export.admin import ImportExportModelAdmin
# Register your models here.
class AwNotificationAdmin(ImportExportModelAdmin):
list_display = ('user','Send_Status', 'Created_by', 'Created_by','Created_date','Read_Status','Read_date')
admin.site.register(AwNotification,AwNotificationAdmin)
|
[
"sachinkushwah0007@gmail.com"
] |
sachinkushwah0007@gmail.com
|
b23ab19206208ca963896607269c13453e188470
|
0d61f90e3a7877e91d72fed71b0895c7070dc046
|
/final_project/.history/project/menu_app/urls_20210103102257.py
|
433c7dab24301af7a57b5a070851582c545d4fcd
|
[] |
no_license
|
lienusrob/final_project
|
44d7d90dc0b7efc0cf55501549a5af0110d09b3b
|
4164769626813f044ec2af3e7842514b5699ef77
|
refs/heads/master
| 2023-02-10T16:36:33.439215
| 2021-01-05T09:34:01
| 2021-01-05T09:34:01
| 325,002,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
# from project.account_app.views import HomeView
from django.urls import path
from . import views
from .views import view_cart, add_to_cart, adjust_cart
urlpatterns = [
#path('', views.menu_list_view(template_name = 'menu_app/menu_list.html'), name = 'menu_list'),
#path('menu/', views.menu_list_view, name = 'menu_list'),
#path ('', views.menu_category, name = 'menu_category'),
# path ('admin_page/', views.MenuItem, name = 'menu_item'),
# path ('', views.home, name = "home"),
# path ('cart/', views.cart, name = "cart"),
# path ('<str:name>/', views.menu_details, name = 'menu_details'),
path('', views.home, name="home"),
path('cart/', views.cart, name="cart"),
path('<str:name>/', views.menu_details, name="menu_details"),
pat(r'^$', view_cart, name='view_cart'),
url(r'^add/(?P<id>\d+)', add_to_cart, name='add_to_cart'),
url(r'^adjust/(?P<id>\d+)', adjust_cart, name='adjust_cart'),
]
|
[
"lienus.rob@hotmail.de"
] |
lienus.rob@hotmail.de
|
80cecf6b14e008ae28b91dce83556e31dd9fb1a7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_81/507.py
|
bdd84a38baf779ae289c6f1817d6c5003b53e93f
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
fp = open('A-large.in')
T = int(fp.readline())
for t in range(T):
N = int(fp.readline())
M = []
for n in range(N):
s = fp.readline().strip()
M.append(s)
print "Case #%d:"%(t+1)
WP = []
for n in range(N):
playcount = 0
wins = 0.0
wp = 0.0
for i in range(len(M[n])):
if M[n][i] == '0' :
playcount += 1
if M[n][i] == '1' :
playcount += 1
wins += 1
if playcount != 0: wp = wins/playcount
# print playcount, wins, (wins/playcount)
WP.append(wp)
OWP = []
for n in range(N):
owp = 0.0
wpcount = 0.0
for j in range(N):
if M[j][n] != '.':
playcount = 0
wins = 0.0
wp = 0.0
for i in range(len(M[n])):
if (M[j][i] == '0') and (i != n):
playcount += 1
if (M[j][i] == '1') and (i != n):
playcount += 1
wins += 1
if playcount != 0:
wp = wins/playcount
wpcount +=1
owp += wp
OWP.append(owp/wpcount)
OOWP = []
for n in range(N):
oowp = 0.0
count = 0.0
for i in range(N):
if M[n][i] != '.':
oowp += OWP[i]
count += 1
OOWP.append(oowp/count)
for n in range(N):
RPI = 0.25 * WP[n] + 0.50 * OWP[n] + 0.25 * OOWP[n]
print RPI
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d8b174c28eccb7c63f64135fafc8e975c5d0d119
|
fd574c2d37afd8fddc548f64aa8befdfcf96d43a
|
/greedy/queueByHeight.py
|
bcee8e588c9ce1d64f6ecc744310778402e743e9
|
[] |
no_license
|
g10guang/LeetCode
|
023f1630611b05edf0ba069adf383e86db66c3f3
|
da2a75c3c4a853b4768ae03bab7725e11cf38d1a
|
refs/heads/master
| 2021-07-05T15:29:03.007752
| 2020-07-15T14:32:10
| 2020-07-15T14:32:10
| 129,694,867
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
# https://leetcode.com/problems/queue-reconstruction-by-height/description/
class Solution:
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
length = len(people)
i = length - 1
l = sorted(people, key=lambda x: x[0] * 1000 + x[1])
while i >= 0 and l[-1][0] == l[i][0]:
i -= 1
while i >= 0:
pos = l[i][1]
k = i - 1
while k >= 0 and l[k][0] == l[i][0]:
pos -= 1
k -= 1
for j in range(pos):
t = i+j
l[t+1], l[t] = l[t], l[t+1]
i -= 1
return l
|
[
"g10guang@gmail.com"
] |
g10guang@gmail.com
|
747653f0003ab3203563cf84d3100573691845ae
|
20b04495f17e7cb9970feffb92eb2b2fc05289a3
|
/sample/ThreadPy.py
|
7bf1df16327b09d783ffb6cae3f622729d8c4e3a
|
[] |
no_license
|
to-yuki/pythonLab
|
4a69e5f24df86005f32bda0e41ddfd23a2942335
|
270487690818faa90d5c17e6619d0b531f9a4f39
|
refs/heads/master
| 2021-09-09T23:59:25.938777
| 2018-03-20T09:26:18
| 2018-03-20T09:26:18
| 108,782,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# -*- coding: UTF-8 -*-
import threading
from time import sleep
# スレッド化する関数1
def thread1():
# 1秒間隔で "T1" を表示
for i in range(5):
sleep(1)
print "T1 ",
# スレッド化する関数2
def thread2():
# 2秒間隔で "T2" を表示
for i in range(5):
sleep(2)
print "T2 ",
# メインスレッド関数
def mainThread():
# スレッドオブジェクトの作成
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
# 作成したスレッドオブジェクトのスタート
t1.start()
t2.start()
# メインスレッド関数を呼び出し
if __name__=='__main__':
mainThread()
|
[
"to-yuki@jtp.co.jp"
] |
to-yuki@jtp.co.jp
|
d033b899c9e461cf8c7d511f8aea043dfb973638
|
5af277b5819d74e61374d1d78c303ac93c831cf5
|
/grouptesting/utils.py
|
8a680f40932fb818fdc48333c6f8d26210eaa058
|
[
"Apache-2.0"
] |
permissive
|
Ayoob7/google-research
|
a2d215afb31513bd59bc989e09f54667fe45704e
|
727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7
|
refs/heads/master
| 2022-11-11T03:10:53.216693
| 2020-06-26T17:13:45
| 2020-06-26T17:13:45
| 275,205,856
| 2
| 0
|
Apache-2.0
| 2020-06-26T16:58:19
| 2020-06-26T16:58:18
| null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Some useful array manipulations for sampling."""
import jax
import jax.numpy as np
def unique(rng, binary_vectors):
"""Computes the number of unique binary columns."""
alpha = jax.random.normal(rng, shape=((1, binary_vectors.shape[1])))
return 1 + np.count_nonzero(
np.diff(np.sort(np.sum(binary_vectors * alpha, axis=-1))))
def select_from_sizes(values, sizes):
"""Selects using indices group_sizes the relevant values for a parameter.
Given a parameter vector (or possibly constant) that describes values
for groups of size 1,2,...., k_max selects values according to vector
group_sizes. When an item in group_sizes is larger than the size of
the vector, we revert to the last element of the vector by default.
Note that the values array is 0-indexed, therefore the values corresponding
to size 1 is values[0], to size 2 values[1] and more generally, the value for
a group of size i is values[i-1].
Args:
values: a np.ndarray that can be of size 1 or more, from which to seleect
the values from.
sizes: np.array[int] representing the group sizes we want to extract the
values of.
Returns:
vector of parameter values, chosen at corresponding group sizes,
of the same size of group_sizes.
Raises:
ValueError when the size array is not one dimensional.
"""
dim = np.ndim(values)
if dim > 1:
raise ValueError(f"sizes argument has dimension {dim} > 1.")
# The values are 0-indexed, but sizes are strictly positives.
indices = np.minimum(sizes, np.size(values)) - 1
return np.squeeze(values[list(indices)])
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
2377ec59602cc8f7f6606e2b2bc07593ae2982a3
|
7b6377050fba4d30f00e9fb5d56dfacb22d388e1
|
/xData/Documentation/keyword.py
|
047eedd677d3fb462e8a9cbd6adeadcbebc84fca
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/fudge
|
0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370
|
6ba80855ae47cb32c37f635d065b228fadb03412
|
refs/heads/master
| 2023-08-16T21:05:31.111098
| 2023-08-01T22:09:32
| 2023-08-01T22:09:32
| 203,678,373
| 21
| 4
|
NOASSERTION
| 2023-06-28T20:51:02
| 2019-08-21T23:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains the GNDS documentation child nodes keywords and keywork classes.
"""
from LUPY import ancestry as ancestryModule
from .. import suite as suiteModule
from .. import text as textModule
class Keyword( textModule.Text ) :
"""A class representing a GNDS documentation/abstract node."""
moniker = 'keyword'
keyName = 'type'
def __init__( self, label, type, text ) :
textModule.Text.__init__( self, text, label = label )
self.__type = type
@property
def type( self ) :
return( self.__type )
def XML_extraAttributes( self, **kwargs ) :
if( self.__type == '' ) : return ''
return ' type="%s"' % self.__type
@classmethod
def parseNodeUsingClass(cls, node, xPath, linkData, **kwargs):
label = node.get( 'label' )
type = node.get( 'type' )
return cls(label, type, None)
class Keywords( suiteModule.Suite ) :
moniker = 'keywords'
suiteName = 'type'
def __init__( self ) :
suiteModule.Suite.__init__( self, [ Keyword ] )
|
[
"mattoon1@llnl.gov"
] |
mattoon1@llnl.gov
|
4fb633e66e24821fa918896ec72f310a2c7d7300
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/count-square-submatrices-with-all-ones.py
|
aedd724cf51f450b6e3698b4879fa2c9d95b2743
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Time: O(m * n)
# Space: O(1)
class Solution(object):
def countSquares(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if not matrix[i][j]:
continue
l = min(matrix[i-1][j], matrix[i][j-1])
matrix[i][j] = l+1 if matrix[i-l][j-l] else l
return sum(x for row in matrix for x in row)
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
9b66a08d83f538492518e8d3c941d747b9fd27df
|
b7851ffc689990a5c394697b1d016ba34307630c
|
/venv/lib/python3.8/site-packages/faker/providers/ssn/uk_UA/__init__.py
|
7673ee7bc606d47605ae2f9243e218267865b6d0
|
[] |
no_license
|
denokenya/django-schooling-rest-api
|
f38fb5cc31a6f40462f9cb1dcc6c3fd36e1301c6
|
552b98d5494344049541df615f446713cb5da1fa
|
refs/heads/main
| 2023-06-14T12:53:11.897887
| 2021-07-10T18:02:11
| 2021-07-10T18:02:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
from datetime import date
from .. import Provider as SsnProvider
class Provider(SsnProvider):
def ssn(self):
"""
Ukrainian "Реєстраційний номер облікової картки платника податків"
also known as "Ідентифікаційний номер фізичної особи".
"""
digits = []
# Number of days between 1899-12-31 and a birth date
for digit in str((self.generator.date_object() - date(1899, 12, 31)).days):
digits.append(int(digit))
# Person's sequence number
for _ in range(4):
digits.append(self.random_int(0, 9))
checksum = (
digits[0] * -1
+ digits[1] * 5
+ digits[2] * 7
+ digits[3] * 9
+ digits[4] * 4
+ digits[5] * 6
+ digits[6] * 10
+ digits[7] * 5
+ digits[8] * 7
)
# Remainder of a checksum divided by 11 or 1 if it equals to 10
digits.append(checksum % 11 % 10)
return "".join(str(digit) for digit in digits)
|
[
"lucasciccomy@gmail.com"
] |
lucasciccomy@gmail.com
|
2a5d6a8fa6c48f12df1474865332576ad3e7cadc
|
7882860350c714e6c08368288dab721288b8d9db
|
/구현/swea1954.py
|
ed62a9b2ebde3549be409ada6635d1d7bd8ba1fd
|
[] |
no_license
|
park-seonju/Algorithm
|
682fca984813a54b92a3f2ab174e4f05a95921a8
|
30e5bcb756e9388693624e8880e57bc92bfda969
|
refs/heads/master
| 2023-08-11T18:23:49.644259
| 2021-09-27T10:07:49
| 2021-09-27T10:07:49
| 388,741,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
delta = (
(0, 1), # 열증가
(1, 0), # 행증가
(0, -1), # 열감소
(-1, 0), # 행감소
)
T = int(input())
for test_case in range(1, T + 1):
N = int(input())
board = [[0] * N for _ in range(N)]
def snail():
row = 0
col = 0
num = 1
distance = max(N - 1,1)
while True:
for i in range(4):
for _ in range(distance):
print(board)
board[row][col] = num
num += 1
if num > N ** 2:
return
dr, dc = delta[i]
row += dr
col += dc
row += 1
col += 1
distance = max(1,distance-2)
snail()
print('#%d' % test_case)
for r in range(N):
for c in range(N):
print(board[r][c], end=' ')
print()
|
[
"cucu9823@naver.com"
] |
cucu9823@naver.com
|
51cdbcf4d68698ce397c18e4b7206e52ed374f3e
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/speedchat/TTSCSellbotNerfMenu.py
|
bbd255756bc5a8d3dac7b3e4c5b633742d7a73a5
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518
| 2018-11-13T03:17:53
| 2018-11-13T03:17:53
| 119,129,731
| 2
| 6
| null | 2018-11-07T22:10:10
| 2018-01-27T03:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
#Embedded file name: toontown.speedchat.TTSCSellbotNerfMenu
from toontown.toonbase import ToonPythonUtil as PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from otp.otpbase import OTPLocalizer
SellbotNerfMenu = [(OTPLocalizer.SellbotNerfMenuSections[0], [30150,
30151,
30152,
30153,
30154,
30155,
30156]), (OTPLocalizer.SellbotNerfMenuSections[1], [30157,
30158,
30159,
30160,
30161,
30162,
30163,
30164]), (OTPLocalizer.SellbotNerfMenuSections[2], [30165,
30166,
30167,
30168,
30169,
30170,
30171,
30172,
30173,
30174,
30175])]
class TTSCSellbotNerfMenu(SCMenu):
def __init__(self):
SCMenu.__init__(self)
self.__messagesChanged()
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __messagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for section in SellbotNerfMenu:
if section[0] == -1:
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Sellbot Nerf phrase %s which does not seem to exist' % phrase
break
self.append(SCStaticTextTerminal(phrase))
else:
menu = SCMenu()
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Sellbot Nerf phrase %s which does not seem to exist' % phrase
break
menu.append(SCStaticTextTerminal(phrase))
menuName = str(section[0])
self.append(SCMenuHolder(menuName, menu))
|
[
"linktlh@gmail.com"
] |
linktlh@gmail.com
|
f0a517b00e54ed242fe81c1004b6aee54258f5a3
|
e7f67295e62fc5301ab23bce06c61f2311c2eeee
|
/mjml/core/registry.py
|
e2271ec00d336fba2502dc0692184cf39c26c0c4
|
[
"MIT"
] |
permissive
|
bayesimpact/mjml-stub
|
94d10588359990cd58d2085429b19a3777c51f15
|
30bab3f2e197d2f940f58439f2e8cd9fadb58d48
|
refs/heads/main
| 2023-05-08T11:54:19.313877
| 2021-01-25T21:30:48
| 2021-01-25T21:30:48
| 344,026,118
| 0
| 0
|
MIT
| 2021-03-03T06:31:49
| 2021-03-03T06:31:48
| null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
__all__ = []
def _components():
from ..elements import (MjButton, MjText, MjSection, MjColumn, MjBody,
MjGroup, MjImage, MjDivider, MjTable, MjRaw)
from ..elements.head import (MjAttributes, MjFont, MjHead, MjPreview, MjStyle,
MjTitle)
components = {
'mj-button': MjButton,
'mj-text': MjText,
'mj-divider': MjDivider,
'mj-image': MjImage,
'mj-section': MjSection,
'mj-column': MjColumn,
'mj-body': MjBody,
'mj-group' : MjGroup,
'mj-table' : MjTable,
'mj-raw' : MjRaw,
# --- head components ---
'mj-attributes': MjAttributes,
'mj-font': MjFont,
'mj-head': MjHead,
'mj-preview': MjPreview,
'mj-title': MjTitle,
'mj-style': MjStyle,
}
return components
|
[
"felix.schwarz@oss.schwarz.eu"
] |
felix.schwarz@oss.schwarz.eu
|
6105554d84433e416576e543c9d8029e5b038601
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_body_61.py
|
83de2fb29e48fbb0b61eadc44a78bdfab46ca709
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.body_61 import Body61
class TestBody61(unittest.TestCase):
""" Body61 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBody61(self):
"""
Test Body61
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.body_61.Body61()
pass
if __name__ == '__main__':
unittest.main()
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
f7754d30c7af009e5787a949e16e849192197347
|
fde8c89b352076f95cc16e589b1baf18f7befb51
|
/tempest/api/volume/admin/v2/test_snapshot_manage.py
|
111492428e46f21cf3942f3c59602e1e69b2eb0a
|
[] |
no_license
|
571451370/devstack_mitaka
|
b11145256deab817bcdf60a01a67bb6b2f9ddb52
|
1bdd3f2598f91c1446b85c5b6def7784a2f6ab02
|
refs/heads/master
| 2020-08-26T12:53:07.482514
| 2017-04-12T01:32:55
| 2017-04-12T01:32:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class SnapshotManageAdminV2Test(base.BaseVolumeAdminTest):
"""Unmanage & manage snapshots
This feature provides the ability to import/export volume snapshot
from one Cinder to another and to import snapshots that have not been
managed by Cinder from a storage back end to Cinder
"""
@decorators.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
@testtools.skipUnless(CONF.volume_feature_enabled.manage_snapshot,
"Manage snapshot tests are disabled")
def test_unmanage_manage_snapshot(self):
# Create a volume
volume = self.create_volume()
# Create a snapshot
snapshot = self.create_snapshot(volume_id=volume['id'])
# Unmanage the snapshot
# Unmanage snapshot function works almost the same as delete snapshot,
# but it does not delete the snapshot data
self.admin_snapshots_client.unmanage_snapshot(snapshot['id'])
self.admin_snapshots_client.wait_for_resource_deletion(snapshot['id'])
# Fetch snapshot ids
snapshot_list = [
snap['id'] for snap in
self.snapshots_client.list_snapshots()['snapshots']
]
# Verify snapshot does not exist in snapshot list
self.assertNotIn(snapshot['id'], snapshot_list)
# Manage the snapshot
snapshot_ref = '_snapshot-%s' % snapshot['id']
new_snapshot = self.admin_snapshot_manage_client.manage_snapshot(
volume_id=volume['id'],
ref={'source-name': snapshot_ref})['snapshot']
self.addCleanup(self.delete_snapshot,
self.admin_snapshots_client, new_snapshot['id'])
# Wait for the snapshot to be available after manage operation
waiters.wait_for_snapshot_status(self.admin_snapshots_client,
new_snapshot['id'],
'available')
# Verify the managed snapshot has the expected parent volume
self.assertEqual(new_snapshot['volume_id'], volume['id'])
|
[
"tony.pig@gmail.com"
] |
tony.pig@gmail.com
|
856d2373425f060429aa63fde2ae8ce926777eec
|
888899f0cb3e6e7b28a9de39001a1fd1c177cd35
|
/COMPLETE PYTHON-3 COURSE/Chapter-03-IF_ELSE_FOR_WHILE_LOOP/infinite_loop.py
|
9e010471ab0611accc49974e87c73a684c37d8ed
|
[] |
no_license
|
VivakaNand/COMPLETE_PYTHON_3
|
ef162d71d3a44bf661fcc1a8aacce31e7953cd7c
|
b3b835afe7671fdc3d29d912650fd4ccd3bc83f6
|
refs/heads/master
| 2023-02-04T10:13:41.881939
| 2020-12-23T08:30:51
| 2020-12-23T08:30:51
| 323,839,528
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
# infinite loop
#i = 0
#while i<10:
# print(Hello World)
while True:
print("Hello World")
# to terminate infinite loop use command # Ctrl + c
|
[
"vivekjetani83@gmail.com"
] |
vivekjetani83@gmail.com
|
5fd0337e4783437ab44e5fcf862272a2e3c1070e
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/identity/azure-identity/azure/identity/_credentials/device_code.py
|
688c44c12f6b04cad5a40b0980d84b109f0cfcad
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 5,815
|
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
import time
from typing import Dict, Optional, Callable, Any
from azure.core.exceptions import ClientAuthenticationError
from .._constants import DEVELOPER_SIGN_ON_CLIENT_ID
from .._internal import InteractiveCredential, wrap_exceptions
class DeviceCodeCredential(InteractiveCredential):
"""Authenticates users through the device code flow.
When :func:`get_token` is called, this credential acquires a verification URL and code from Azure Active Directory.
A user must browse to the URL, enter the code, and authenticate with Azure Active Directory. If the user
authenticates successfully, the credential receives an access token.
This credential is primarily useful for authenticating a user in an environment without a web browser, such as an
SSH session. If a web browser is available, :class:`~azure.identity.InteractiveBrowserCredential` is more
convenient because it automatically opens a browser to the login page.
:param str client_id: client ID of the application users will authenticate to. When not specified users will
authenticate to an Azure development application.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com",
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword str tenant_id: an Azure Active Directory tenant ID. Defaults to the "organizations" tenant, which can
authenticate work or school accounts. **Required for single-tenant applications.**
:keyword int timeout: seconds to wait for the user to authenticate. Defaults to the validity period of the
device code as set by Azure Active Directory, which also prevails when **timeout** is longer.
:keyword prompt_callback: A callback enabling control of how authentication
instructions are presented. Must accept arguments (``verification_uri``, ``user_code``, ``expires_on``):
- ``verification_uri`` (str) the URL the user must visit
- ``user_code`` (str) the code the user must enter there
- ``expires_on`` (datetime.datetime) the UTC time at which the code will expire
If this argument isn't provided, the credential will print instructions to stdout.
:paramtype prompt_callback: Callable[str, str, ~datetime.datetime]
:keyword AuthenticationRecord authentication_record: :class:`AuthenticationRecord` returned by :func:`authenticate`
:keyword bool disable_automatic_authentication: if True, :func:`get_token` will raise
:class:`AuthenticationRequiredError` when user interaction is required to acquire a token. Defaults to False.
:keyword cache_persistence_options: configuration for persistent token caching. If unspecified, the credential
will cache tokens in memory.
:paramtype cache_persistence_options: ~azure.identity.TokenCachePersistenceOptions
:keyword bool disable_authority_validation_and_instance_discovery: Determines whether or not instance discovery
is performed when attempting to authenticate. Setting this to true will completely disable instance discovery
and authority validation.
.. admonition:: Example:
.. literalinclude:: ../samples/credential_creation_code_snippets.py
:start-after: [START create_device_code_credential]
:end-before: [END create_device_code_credential]
:language: python
:dedent: 4
:caption: Create a DeviceCodeCredential.
"""
def __init__(
self,
client_id: str = DEVELOPER_SIGN_ON_CLIENT_ID,
*,
timeout: Optional[int] = None,
prompt_callback: Optional[Callable[[str, str, datetime], None]] = None,
**kwargs: Any
) -> None:
self._timeout = timeout
self._prompt_callback = prompt_callback
super(DeviceCodeCredential, self).__init__(client_id=client_id, **kwargs)
@wrap_exceptions
def _request_token(self, *scopes: str, **kwargs: Any) -> Dict:
# MSAL requires scopes be a list
scopes = list(scopes) # type: ignore
app = self._get_app(**kwargs)
flow = app.initiate_device_flow(scopes)
if "error" in flow:
raise ClientAuthenticationError(
message="Couldn't begin authentication: {}".format(flow.get("error_description") or flow.get("error"))
)
if self._prompt_callback:
self._prompt_callback(
flow["verification_uri"], flow["user_code"], datetime.utcfromtimestamp(flow["expires_at"])
)
else:
print(flow["message"])
if self._timeout is not None and self._timeout < flow["expires_in"]:
# user specified an effective timeout we will observe
deadline = int(time.time()) + self._timeout
result = app.acquire_token_by_device_flow(
flow, exit_condition=lambda flow: time.time() > deadline, claims_challenge=kwargs.get("claims")
)
else:
# MSAL will stop polling when the device code expires
result = app.acquire_token_by_device_flow(flow, claims_challenge=kwargs.get("claims"))
# raise for a timeout here because the error is particular to this class
if "access_token" not in result and result.get("error") == "authorization_pending":
raise ClientAuthenticationError(message="Timed out waiting for user to authenticate")
# base class will raise for other errors
return result
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
35dbc8417982370fd70de25f449e08b98d2b5632
|
0ba2c3776618b5b8b76f4a23f21e9c6ad3f6e2e1
|
/afterclass/homework3/6.1.py
|
fc73fce3ae1c98fb5f0ed793b9585b6b9b1ba543
|
[] |
no_license
|
WangDongDong1234/python_code
|
6dc5ce8210b1dcad7d57320c9e1946fd4b3fe302
|
6a785306a92d328a0d1427446ca773a9803d4cc0
|
refs/heads/master
| 2020-04-15T12:35:03.427589
| 2019-09-16T15:38:25
| 2019-09-16T15:38:25
| 164,681,323
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
while 1:
try:
str=input()
array=[int(i) for i in str.strip().split(" ")]
list=array[1::]
result=[]
for i in range(array[0]):
result.append(0)
for i in range(array[0]):
for j in range(array[0]):
if list[i]>list[j]:
result[i]+=1
print_result=[]
for i in range(0,array[0]):
for j in range(0,len(result)):
if i==result[j]:
print_result.append(list[j])
for i in range(0,len(print_result)):
if i==0:
print(print_result[0],end="")
else:
print("",print_result[i],end="")
print()
except EOFError:
break
|
[
"827495316@qq.com"
] |
827495316@qq.com
|
6908c575ccde591afe9caf7d459b020ca4b7f5b9
|
56ca0c81e6f8f984737f57c43ad8d44a84f0e6cf
|
/src/api_v1/serializers/openaccess.py
|
aada052fbc6179eb4bd84c660136bfb111f92683
|
[
"MIT"
] |
permissive
|
iplweb/bpp
|
c40f64c78c0da9f21c1bd5cf35d56274a491f840
|
a3d36a8d76733a479e6b580ba6ea57034574e14a
|
refs/heads/dev
| 2023-08-09T22:10:49.509079
| 2023-07-25T04:55:54
| 2023-07-25T04:55:54
| 87,017,024
| 2
| 0
|
NOASSERTION
| 2023-03-04T04:02:36
| 2017-04-02T21:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
from rest_framework import serializers
from bpp.models import Czas_Udostepnienia_OpenAccess
class Czas_Udostepnienia_OpenAccess_Serializer(serializers.HyperlinkedModelSerializer,):
class Meta:
model = Czas_Udostepnienia_OpenAccess
fields = ["id", "nazwa", "skrot"]
|
[
"michal.dtz@gmail.com"
] |
michal.dtz@gmail.com
|
5a8427de6fdcde4bce96df3cfac1ad3cec39ace3
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil1203.py
|
fafeb57574e59bf7a05a7e9fb1487768b3914af1
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
# qubit number=5
# total number=51
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=31
prog += CZ(1,0) # number=32
prog += H(0) # number=33
prog += H(1) # number=44
prog += CZ(0,1) # number=45
prog += H(1) # number=46
prog += X(1) # number=41
prog += H(1) # number=48
prog += CZ(0,1) # number=49
prog += H(1) # number=50
prog += X(0) # number=26
prog += CNOT(1,0) # number=27
prog += H(1) # number=37
prog += CZ(0,1) # number=38
prog += H(1) # number=39
prog += X(1) # number=35
prog += CNOT(0,1) # number=36
prog += X(2) # number=11
prog += X(3) # number=12
prog += CNOT(3,2) # number=43
prog += CNOT(3,2) # number=47
prog += X(0) # number=13
prog += CNOT(0,1) # number=22
prog += X(1) # number=23
prog += CNOT(0,1) # number=24
prog += X(2) # number=15
prog += X(1) # number=29
prog += Y(4) # number=28
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1203.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
11feff481cc0b103cd744e5ddaa43c27c9b9557e
|
a5698f82064aade6af0f1da21f504a9ef8c9ac6e
|
/huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_auditlogs_request.py
|
a1697211afe96dd24be2581077e2977269f456d9
|
[
"Apache-2.0"
] |
permissive
|
qizhidong/huaweicloud-sdk-python-v3
|
82a2046fbb7d62810984399abb2ca72b3b47fac6
|
6cdcf1da8b098427e58fc3335a387c14df7776d0
|
refs/heads/master
| 2023-04-06T02:58:15.175373
| 2021-03-30T10:47:29
| 2021-03-30T10:47:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,513
|
py
|
# coding: utf-8
import pprint
import re
import six
class ListAuditlogsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'node_id': 'str',
'start_time': 'str',
'end_time': 'str',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'node_id': 'node_id',
'start_time': 'start_time',
'end_time': 'end_time',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, x_language=None, instance_id=None, node_id=None, start_time=None, end_time=None, offset=None, limit=None):
"""ListAuditlogsRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._instance_id = None
self._node_id = None
self._start_time = None
self._end_time = None
self._offset = None
self._limit = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
if node_id is not None:
self.node_id = node_id
self.start_time = start_time
self.end_time = end_time
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListAuditlogsRequest.
:return: The x_language of this ListAuditlogsRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListAuditlogsRequest.
:param x_language: The x_language of this ListAuditlogsRequest.
:type: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListAuditlogsRequest.
:return: The instance_id of this ListAuditlogsRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAuditlogsRequest.
:param instance_id: The instance_id of this ListAuditlogsRequest.
:type: str
"""
self._instance_id = instance_id
@property
def node_id(self):
"""Gets the node_id of this ListAuditlogsRequest.
:return: The node_id of this ListAuditlogsRequest.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this ListAuditlogsRequest.
:param node_id: The node_id of this ListAuditlogsRequest.
:type: str
"""
self._node_id = node_id
@property
def start_time(self):
"""Gets the start_time of this ListAuditlogsRequest.
:return: The start_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListAuditlogsRequest.
:param start_time: The start_time of this ListAuditlogsRequest.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ListAuditlogsRequest.
:return: The end_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListAuditlogsRequest.
:param end_time: The end_time of this ListAuditlogsRequest.
:type: str
"""
self._end_time = end_time
@property
def offset(self):
"""Gets the offset of this ListAuditlogsRequest.
:return: The offset of this ListAuditlogsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListAuditlogsRequest.
:param offset: The offset of this ListAuditlogsRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListAuditlogsRequest.
:return: The limit of this ListAuditlogsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAuditlogsRequest.
:param limit: The limit of this ListAuditlogsRequest.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAuditlogsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
229367ad40fc730f2e52dd2f23c9e2967bc956e6
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/b8wRDMWgMZTN2nmfx_12.py
|
d816d604f90e8b6c23df10170ce6f72b2ee90835
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
def equal(a, b, c):
eq = 0
if a == b and a == c and b == c:
eq = 3
elif a == b or a == c or b == c:
eq = 2
return eq
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
9af273cac076fed3d2bb3147957deeebc7485900
|
c6dc8b682aea706b18b05952f791e01989db3669
|
/Programiz/checkStringIsPalindrome.py
|
4d9854af2259cf67461730ef2b277c103a7f238e
|
[] |
no_license
|
LizaPersonal/personal_exercises
|
aeb9ceb2593a6d5ee1a8e9f7c0862ce638acd29b
|
649dc0c116861995fbf58b4736a0c66fd75d648c
|
refs/heads/master
| 2021-04-03T02:17:51.850676
| 2018-07-31T21:10:59
| 2018-07-31T21:10:59
| 125,123,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Program to check if a string is palindrome or not
# change this value for a different output
my_str = 'aIbohPhoBiA'
# make it suitable for caseless comparison
my_str = my_str.casefold()
# reverse the string
rev_str = reversed(my_str)
# check if the string is equal to its reverse
if list(my_str) == list(rev_str):
print("It is palindrome")
else:
print("It is not palindrome")
|
[
"liza@rocketrip.com"
] |
liza@rocketrip.com
|
4a8b858b417a2c65f955b1ce76dfdf44fba59bea
|
8629f82f971f4e036c2b6358fe353a2c88bfd098
|
/BConverters/AnnotationConverters.py
|
76e538d13ae67693278507c13db362dc14a696cc
|
[
"MIT"
] |
permissive
|
mahajrod/MAVR
|
92828fa1c191b5f8ed08f1ba33f1684df09742cd
|
8c57ff5519f130357e36e6f12868bc997e52a8a7
|
refs/heads/master
| 2023-08-25T01:02:24.738724
| 2023-08-22T15:13:39
| 2023-08-22T15:13:39
| 21,181,911
| 11
| 6
| null | 2017-09-18T20:25:16
| 2014-06-24T21:45:57
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
__author__ = 'mahajrod'
import os
from BCBio import GFF
class AnnotationConverters:
"
@staticmethod
def gff22gff3(input_file, output_file, target_lines=100000):
in_fd = open(input_file, "r")
out_fd = open(output_file, "w")
GFF.write(GFF.parse(in_fd, target_lines=target_lines), out_fd)
in_fd.close()
out_fd.close()
"
@staticmethod
def gff32gtf(input_file, output_file):
os.system("gffread %s -T -o %s" % (input_file, output_file))
@staticmethod
def gtf2gff3(input_file, output_file):
os.system("gffread %s -o %s" % (input_file, output_file))
|
[
"mahajrod@gmail.com"
] |
mahajrod@gmail.com
|
a510a15dfe373e469b66e9435f40c354e16b56cf
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res_bw/scripts/common/lib/encodings/cp1026.py
|
c4de9dd6738fbafdea4d05746feed1a2dabe7703
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 2,269
|
py
|
# 2016.02.14 12:48:01 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/cp1026.py
""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors = 'strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors = 'strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final = False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final = False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4#\xd2\xd3\xd50123456789\xb3\xdb"\xd9\xda\x9f'
encoding_table = codecs.charmap_build(decoding_table)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\cp1026.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:48:01 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
1fe21bfb31d89042f71c3414c4e127daaeb2dadb
|
eb61d62ca1f6f0123e3771105f5dfbbd6115138d
|
/.history/leccion_20210910224916.py
|
890f138fa4238e95326e91bb5a1829aaf353de86
|
[] |
no_license
|
Alopezm5/CORRECTO-2
|
e0f14bcc3a88c0e222d10e3261e68532008bc42e
|
223613f1fb04dce3fac9f82f243cb2f22fe100f3
|
refs/heads/main
| 2023-07-29T06:52:48.147424
| 2021-09-12T20:33:27
| 2021-09-12T20:33:27
| 388,995,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from datetime import date
class Calculos:
def antiguedad(self,fecha):
hoy=date.today()
if hoy<fecha:
return -1
else:
anio=fecha.year
mes=fecha.month
dia=fecha.day
aa=0
while fecha<hoy:
aa=1
fecha=date(anio,mes,dia)
return aa
cal = Calculos()
print(cal.antiguedad(date(1971, 6, 9)))
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
aa3ae29b2d0ea43a8ec9e447b892f19baf331b19
|
258b656d1b6864726864f89d4c8dc38fc633a48f
|
/odoo_addons_customization/lending_import_kairos/models/lending_kairos_line.py
|
319e3707c1efd5689559ff6c6ea912bea29179ab
|
[] |
no_license
|
test-odoorosario/opt
|
c17e1c1767710ca8e13a799644fb85b07e83639b
|
77921b4d965f2e4c081d523b373eb306a450a873
|
refs/heads/master
| 2022-12-02T04:36:04.685119
| 2019-07-11T17:17:20
| 2019-07-11T17:17:20
| 196,436,293
| 0
| 1
| null | 2022-11-22T00:30:40
| 2019-07-11T17:13:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
# - coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class LendingKairosLine(models.Model):
_name = 'lending.kairos.line'
lending_id = fields.Many2one(
comodel_name='lending',
string='Medicamento'
)
code = fields.Char(
related='lending_id.code',
string='Codigo'
)
name = fields.Char(
related='lending_id.name',
string='Descripcion'
)
description_drug = fields.Char(
related='lending_id.description_drug',
string='Principio activo'
)
description_laboratory = fields.Char(
related='lending_id.description_laboratory',
string='Laboratorio'
)
description_presentation = fields.Char(
related='lending_id.description_presentation',
string='Gramaje y presentación'
)
description_product = fields.Char(
related='lending_id.description_product',
string='Marca comercial'
)
value = fields.Float(
string='Valor',
digits=(12, 6)
)
date = fields.Date(
string='Fecha de vigencia'
)
value_line_ids = fields.One2many(
'lending.kairos.value.line',
'kairos_id',
string="Valores"
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"root@ip-172-31-8-107.sa-east-1.compute.internal"
] |
root@ip-172-31-8-107.sa-east-1.compute.internal
|
f90310a5c43477d44f9d152349a9a72b164b204a
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/homeassistant/components/ws66i/media_player.py
|
1101c0c9fbc8c9e1a5adc29af5766489e54fae72
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518
| 2023-05-07T20:03:37
| 2023-05-07T20:03:37
| 109,931,626
| 24
| 10
|
Apache-2.0
| 2023-02-22T06:24:01
| 2017-11-08T05:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 6,345
|
py
|
"""Support for interfacing with WS66i 6 zone home audio controller."""
from pyws66i import WS66i, ZoneStatus
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
MediaPlayerState,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MAX_VOL
from .coordinator import Ws66iDataUpdateCoordinator
from .models import Ws66iData
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the WS66i 6-zone amplifier platform from a config entry."""
ws66i_data: Ws66iData = hass.data[DOMAIN][config_entry.entry_id]
# Build and add the entities from the data class
async_add_entities(
Ws66iZone(
device=ws66i_data.device,
ws66i_data=ws66i_data,
entry_id=config_entry.entry_id,
zone_id=zone_id,
data_idx=idx,
coordinator=ws66i_data.coordinator,
)
for idx, zone_id in enumerate(ws66i_data.zones)
)
class Ws66iZone(CoordinatorEntity[Ws66iDataUpdateCoordinator], MediaPlayerEntity):
"""Representation of a WS66i amplifier zone."""
def __init__(
self,
device: WS66i,
ws66i_data: Ws66iData,
entry_id: str,
zone_id: int,
data_idx: int,
coordinator: Ws66iDataUpdateCoordinator,
) -> None:
"""Initialize a zone entity."""
super().__init__(coordinator)
self._ws66i: WS66i = device
self._ws66i_data: Ws66iData = ws66i_data
self._zone_id: int = zone_id
self._zone_id_idx: int = data_idx
self._status: ZoneStatus = coordinator.data[data_idx]
self._attr_source_list = ws66i_data.sources.name_list
self._attr_unique_id = f"{entry_id}_{self._zone_id}"
self._attr_name = f"Zone {self._zone_id}"
self._attr_supported_features = (
MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, str(self.unique_id))},
name=self.name,
manufacturer="Soundavo",
model="WS66i 6-Zone Amplifier",
)
self._set_attrs_from_status()
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
# This will be called for each of the entities after the coordinator
# finishes executing _async_update_data()
# Save a reference to the zone status that this entity represents
self._status = self.coordinator.data[self._zone_id_idx]
self._set_attrs_from_status()
# Parent will notify HA of the update
super()._handle_coordinator_update()
@callback
def _set_attrs_from_status(self) -> None:
status = self._status
sources = self._ws66i_data.sources.id_name
self._attr_state = MediaPlayerState.ON if status.power else MediaPlayerState.OFF
self._attr_volume_level = status.volume / float(MAX_VOL)
self._attr_is_volume_muted = status.mute
self._attr_source = self._attr_media_title = sources[status.source]
@callback
def _async_update_attrs_write_ha_state(self) -> None:
self._set_attrs_from_status()
self.async_write_ha_state()
async def async_select_source(self, source: str) -> None:
"""Set input source."""
idx = self._ws66i_data.sources.name_id[source]
await self.hass.async_add_executor_job(
self._ws66i.set_source, self._zone_id, idx
)
self._status.source = idx
self._async_update_attrs_write_ha_state()
async def async_turn_on(self) -> None:
"""Turn the media player on."""
await self.hass.async_add_executor_job(
self._ws66i.set_power, self._zone_id, True
)
self._status.power = True
self._async_update_attrs_write_ha_state()
async def async_turn_off(self) -> None:
"""Turn the media player off."""
await self.hass.async_add_executor_job(
self._ws66i.set_power, self._zone_id, False
)
self._status.power = False
self._async_update_attrs_write_ha_state()
async def async_mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
await self.hass.async_add_executor_job(
self._ws66i.set_mute, self._zone_id, mute
)
self._status.mute = bool(mute)
self._async_update_attrs_write_ha_state()
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(self._set_volume, int(volume * MAX_VOL))
self._async_update_attrs_write_ha_state()
async def async_volume_up(self) -> None:
"""Volume up the media player."""
await self.hass.async_add_executor_job(
self._set_volume, min(self._status.volume + 1, MAX_VOL)
)
self._async_update_attrs_write_ha_state()
async def async_volume_down(self) -> None:
"""Volume down media player."""
await self.hass.async_add_executor_job(
self._set_volume, max(self._status.volume - 1, 0)
)
self._async_update_attrs_write_ha_state()
def _set_volume(self, volume: int) -> None:
"""Set the volume of the media player."""
# Can't set a new volume level when this zone is muted.
# Follow behavior of keypads, where zone is unmuted when volume changes.
if self._status.mute:
self._ws66i.set_mute(self._zone_id, False)
self._status.mute = False
self._ws66i.set_volume(self._zone_id, volume)
self._status.volume = volume
|
[
"noreply@github.com"
] |
konnected-io.noreply@github.com
|
f07cc36cc24fb93bc7abf3c6797c8a06336d73ce
|
61f6a7b3226e892d897538180a41b9d65f3be9ef
|
/run.py
|
ef09f7438d369749a1b07c293e1931da000c9400
|
[
"MIT"
] |
permissive
|
tangbiondi/aantonop-subtitles
|
db7952dc6dd340eabb53b85813493f34ca6e45bd
|
35120f247cee63b4f1c0f937d5c337374ef51ec3
|
refs/heads/master
| 2021-04-28T01:32:54.412736
| 2018-02-20T13:45:31
| 2018-02-20T13:45:31
| 122,281,228
| 0
| 0
|
MIT
| 2018-02-21T01:55:35
| 2018-02-21T01:55:34
| null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
import re
from collections import defaultdict
from glob import glob
languages = set()
videos = {}
for filepath in glob("./subtitles/original/*"):
filename = filepath.replace("./subtitles/original/", "")
title, youtube_id, lang = re.match(r"(.*)-(.{11,13})\.(.*)\.vtt", filename).groups()
languages.add(lang)
if title not in videos:
videos[youtube_id] = {
"title": title,
"subtitles": [{"lang": lang, "filepath": filepath}]
}
else:
videos[youtube_id]["subtitles"].append({{"lang": lang, "filepath": filepath}})
print(filename)
headers = ["No.", "Title"] + list(languages)
langs = list(languages)
print("|", end="")
for header in ["No.", "Title"] + langs:
print(" <sup><sub>{}</sub></sup> |".format(header), end="")
print("")
print("|", end="")
for i in range(len(headers)):
print("----|", end="")
print("")
def multiline_split(str, char_per_line):
words = str.split(" ")
result = ""
line = ""
for word in words:
if len(line + word) < char_per_line:
line += " " + word
else:
result += line + "<br>"
line = word
result += line
return result
lang_stat = defaultdict(int)
for i, (youtube_id, video) in enumerate(videos.items()):
print("| <sup><sub>{}</sub></sup> |".format(i+1), end="", flush=True)
print(" <sup><sub>[{title}]({youtube_link})</sub></sup> |".format(
title=multiline_split(video["title"], 25),
youtube_link="https://www.youtube.com/watch?v={}".format(youtube_id)
), end="", flush=True)
for lang in langs:
if lang in [sub["lang"] for sub in video["subtitles"]]:
print(" <sup><sub>✓</sub></sup> |", end="", flush=True)
lang_stat[lang] += 1
else:
print(" |", end="", flush=True)
print("")
print("")
|
[
"noisy.pl@gmail.com"
] |
noisy.pl@gmail.com
|
88f23fd76116a7637df84116fa5bd6222cf5318a
|
f7dd190a665a4966db33dcc1cc461dd060ca5946
|
/apps/posts/migrations/0001_initial.py
|
97494a6fbf0f67308adabcc659abd18daaae77c0
|
[] |
no_license
|
Darwin939/macmeharder_back
|
2cc35e2e8b39a82c8ce201e63d9f6a9954a04463
|
8fc078333a746ac7f65497e155c58415252b2d33
|
refs/heads/main
| 2023-02-28T12:01:23.237320
| 2021-02-02T17:37:33
| 2021-02-02T17:37:33
| 328,173,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
# Generated by Django 3.1.5 on 2021-01-16 09:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=300)),
('mini_title', models.CharField(blank=True, max_length=300, null=True)),
('body', models.TextField(blank=True, max_length=10000, null=True)),
],
options={
'verbose_name': 'post',
'verbose_name_plural': 'post',
'ordering': ['created'],
},
),
]
|
[
"51247000+Darwin939@users.noreply.github.com"
] |
51247000+Darwin939@users.noreply.github.com
|
8aac422067e1cd8db4e302ec4ff97aacdc5f7736
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02708/s543945314.py
|
6e7a98625c4186a02cb68a2ca3bd2c3ee37b3d97
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
#!/usr/bin/env python3
def main():
N, K = map(int, input().split())
mod = 7 + 10 ** 9
res = 0
for k in range(K, N + 2):
res += ((k * (2 * N - k + 1) / 2) - (k * (k - 1) / 2) + 1)
res %= mod
print(int(res))
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fc6c6f943c0e49189d8ac9a2b455467b12652b7d
|
6b9084d234c87d7597f97ec95808e13f599bf9a1
|
/models/operator/multiscale_deformable_attention/_get_torch_build_conf.py
|
06290f7aabf171664429380306b945bfc02fd19c
|
[] |
no_license
|
LitingLin/ubiquitous-happiness
|
4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc
|
aae2d764e136ca4a36c054212b361dd7e8b22cba
|
refs/heads/main
| 2023-07-13T19:51:32.227633
| 2021-08-03T16:02:03
| 2021-08-03T16:02:03
| 316,664,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
import torch
from torch.utils.cpp_extension import include_paths, library_paths, CUDAExtension, COMMON_NVCC_FLAGS
def _get_torch_cuda_flags():
return COMMON_NVCC_FLAGS
def _get_torch_cuda_archs():
config = torch.__config__.show()
configs = config.split('\n')
archs = set()
for conf in configs:
if 'NVCC' in conf and 'arch' in conf:
ss = conf.split(';')
for s in ss:
s = s.strip()
if s.startswith('arch='):
cs = s[5:].split(',')
for c in cs:
v = c.split('_')
archs.add(int(v[1]))
return archs
def _get_torch_include_paths():
return [path.replace('\\', '/') for path in include_paths(False)]
def _get_torch_library_paths():
return [path.replace('\\', '/') for path in library_paths(False)]
def _get_torch_libraries():
return CUDAExtension('', []).libraries
|
[
"linliting06@live.com"
] |
linliting06@live.com
|
54af20a4223a7fce77f976c6063056318656c59a
|
0fa98dbc4d6256121b9f478a13ff2254047fb543
|
/12_01_typical_interview_tasks/L. Extra letter.py
|
3044323e3d49139edab4073c9b2c1641c929ee7f
|
[] |
no_license
|
vamotest/yandex_algorithms
|
48d5b29cb6e2789ea8f7e8024c798851058f1d4c
|
a588da3d21ff95e2437818493769719600f3eaf7
|
refs/heads/master
| 2023-03-19T20:44:59.373046
| 2021-01-20T19:06:28
| 2021-01-20T19:06:28
| 330,421,669
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from itertools import zip_longest
def define_anagrams(first, second):
li = list(zip_longest(first, second))
for letter in li:
if letter[0] != letter[1]:
return letter[1]
if __name__ == '__main__':
first_word = ''.join(sorted(list(str(input()))))
second_word = ''.join(sorted(list(str(input()))))
result = define_anagrams(first_word, second_word)
print(result)
|
[
"vamotest@gmail.com"
] |
vamotest@gmail.com
|
16953af903b8aa207c099df1b969dace225c4224
|
40c3c3ad98e5d5b10af1cdaa5b5d2278472448a5
|
/tests/app/tests/test_content_panes.py
|
20881cc868cfd0f4365af6ba10b42a04acede327
|
[] |
no_license
|
modohash/django-hstore-flattenfields
|
ac96a509dde799625c01cff078c830b48d479f9d
|
09626a638b9ef85d28fa5bfef1b040f9926bb95b
|
refs/heads/master
| 2021-01-18T16:08:59.850946
| 2015-05-04T23:19:00
| 2015-05-04T23:19:00
| 54,548,872
| 0
| 0
| null | 2016-03-23T09:54:54
| 2016-03-23T09:54:53
| null |
UTF-8
|
Python
| false
| false
| 3,645
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from datetime import date, datetime
from hstore_flattenfields.models import ContentPane, DynamicField
from tests.app.models import AuthorType, Author
from hstore_flattenfields.utils import get_ctype
class AuthorContentPaneTests(TestCase):
def setUp(self):
self.commics_authors = AuthorType.objects.create(
id=1, name="Something Group", slug="commics_authors"
)
self.dramatic_authors = AuthorType.objects.create(
id=2, name="Other Group", slug="dramatic_authors"
)
self.main_info_pane = ContentPane.objects.create(
id=1,
name='Main Info', content_type=get_ctype(Author),
)
self.commic_pane = ContentPane.objects.create(
id=2, name='Commic Information Pane',
content_type=get_ctype(Author),
group=self.commics_authors.dynamicfieldgroup_ptr
)
self.dramatic_pane = ContentPane.objects.create(
id=3, name='Drama Information Pane',
content_type=get_ctype(Author),
group=self.dramatic_authors.dynamicfieldgroup_ptr
)
self.age = DynamicField.objects.create(
id=1, refer="Author",
typo="Integer", name="author_age",
verbose_name=u"Age",
content_pane=self.main_info_pane
)
self.name = DynamicField.objects.create(
id=2, refer="Author",
name="author_name", verbose_name=u"Name",
content_pane=self.main_info_pane
)
self.information = DynamicField.objects.create(
id=3, refer="Author", name="author_information",
verbose_name=u"Information",
group=self.commics_authors.dynamicfieldgroup_ptr
)
self.dramatic_level = DynamicField.objects.create(
id=4, refer="Author", name="author_dramatic_level",
typo="Integer", verbose_name=u"Dramatic Level",
content_pane=self.main_info_pane,
group=self.dramatic_authors.dynamicfieldgroup_ptr
)
def test_assert_content_pane_fields(self):
self.assertQuerysetEqual(
self.main_info_pane.fields,
[
'<DynamicField: Dramatic Level>',
'<DynamicField: Name>',
'<DynamicField: Age>'
]
)
def test_assert_object_content_panes(self):
author = Author.objects.create(
author_age=42, author_name="some-name"
)
self.assertQuerysetEqual(
author.content_panes,
['<ContentPane: Main Info>']
)
def test_assert_groupped_content_panes(self):
author = Author.objects.create(
pk=777,
author_age=42, author_name="some-name"
)
author.author_groups.add(self.commics_authors)
author = Author.objects.get()
self.assertQuerysetEqual(
author.content_panes,
[
'<ContentPane: Commic Information Pane>',
'<ContentPane: Main Info>'
]
)
self.assertQuerysetEqual(
author.dynamic_fields,
[
'<DynamicField: Age>',
'<DynamicField: Name>',
'<DynamicField: Information>'
]
)
|
[
"luanfonceca@gmail.com"
] |
luanfonceca@gmail.com
|
c4397cb30b808d19f39e5dc639214ad89de14d75
|
8832f83436809e8e918e60e5526d95add9fe8dbd
|
/books_app/migrations/0068_auto_20190930_1758.py
|
4ad159bea80bec7a1af460bd8bd499d5f456283d
|
[] |
no_license
|
HCDigitalScholarship/booksofduchesses
|
e31e56eaba253b92a1362de5918b5b005cb27f3c
|
3f0e27515963c92a56714c5bada3b6a68a8665df
|
refs/heads/master
| 2022-12-09T18:41:20.019687
| 2021-10-25T14:58:18
| 2021-10-25T14:58:18
| 190,254,161
| 0
| 3
| null | 2022-12-08T05:21:54
| 2019-06-04T18:05:08
|
Python
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
# Generated by Django 2.2.2 on 2019-09-30 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("books_app", "0067_auto_20190927_1626")]
operations = [
migrations.AlterField(
model_name="dateowned",
name="book_owned",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="books_app.Book",
),
)
]
|
[
"apjanco@gmail.com"
] |
apjanco@gmail.com
|
cbd2c64466a8223e6f012a4117d8d62e307a34fb
|
f2a967dfcb768ef2e5729b1f7665740dc8f4e09c
|
/tti/indicators/_relative_momentum_index.py
|
84f4a6dd01db9e19231be93ec64dd61f5df603f8
|
[
"MIT"
] |
permissive
|
Bill-Software-Engineer/trading-technical-indicators
|
799c44a88ee73fb33c0255cb3ff5463f9d578506
|
fc00008a41da54f160609343e866c72306f4962c
|
refs/heads/master
| 2023-03-24T20:13:16.088567
| 2021-03-24T05:50:58
| 2021-03-24T05:50:58
| 349,295,934
| 0
| 1
|
MIT
| 2021-03-20T04:31:57
| 2021-03-19T04:05:07
| null |
UTF-8
|
Python
| false
| false
| 6,267
|
py
|
"""
Trading-Technical-Indicators (tti) python library
File name: _relative_momentum_index.py
Implements the Relative Momentum Index technical indicator.
"""
import pandas as pd
from ._technical_indicator import TechnicalIndicator
from ..utils.constants import TRADE_SIGNALS
from ..utils.exceptions import NotEnoughInputData, WrongTypeForInputParameter,\
WrongValueForInputParameter
class RelativeMomentumIndex(TechnicalIndicator):
"""
Relative Momentum Index Technical Indicator class implementation.
Args:
input_data (pandas.DataFrame): The input data. Required input column
is ``close``. The index is of type ``pandas.DatetimeIndex``.
period (int, default=8): The past periods to be used for the
calculation of the indicator.
momentum_period (int, default=4): The momentum periods to be used for
the calculation of the indicator.
fill_missing_values (bool, default=True): If set to True, missing
values in the input data are being filled.
Attributes:
_input_data (pandas.DataFrame): The ``input_data`` after preprocessing.
_ti_data (pandas.DataFrame): The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``rmi``.
_properties (dict): Indicator properties.
_calling_instance (str): The name of the class.
Raises:
WrongTypeForInputParameter: Input argument has wrong type.
WrongValueForInputParameter: Unsupported value for input argument.
NotEnoughInputData: Not enough data for calculating the indicator.
TypeError: Type error occurred when validating the ``input_data``.
ValueError: Value error occurred when validating the ``input_data``.
"""
def __init__(self, input_data, period=8, momentum_period=4,
fill_missing_values=True):
# Validate and store if needed, the input parameters
if isinstance(period, int):
if period > 0:
self._period = period
else:
raise WrongValueForInputParameter(
period, 'period', '>0')
else:
raise WrongTypeForInputParameter(
type(period), 'period', 'int')
if isinstance(momentum_period, int):
if momentum_period > 0:
self._momentum_period = momentum_period
else:
raise WrongValueForInputParameter(
momentum_period, 'momentum_period', '>0')
else:
raise WrongTypeForInputParameter(
type(momentum_period), 'momentum_period', 'int')
# Control is passing to the parent class
super().__init__(calling_instance=self.__class__.__name__,
input_data=input_data,
fill_missing_values=fill_missing_values)
def _calculateTi(self):
"""
Calculates the technical indicator for the given input data. The input
data are taken from an attribute of the parent class.
Returns:
pandas.DataFrame: The calculated indicator. Index is of type
``pandas.DatetimeIndex``. It contains one column, the ``rmi``.
Raises:
NotEnoughInputData: Not enough data for calculating the indicator.
"""
# Not enough data for the requested period
if len(self._input_data.index) < self._period + self._momentum_period:
raise NotEnoughInputData('Relative Momentum Index',
self._period + self._momentum_period,
len(self._input_data.index))
rmi = pd.DataFrame(index=self._input_data.index,
columns=['rmi', 'upc', 'dpc', 'smoothed_upc',
'smoothed_dpc'],
data=None, dtype='float64')
# Calculate price change (current close - close momentum periods ago)
close_price_change = self._input_data['close'] - self._input_data[
'close'].shift(self._momentum_period)
# Upward price change
rmi['upc'][close_price_change > 0] = close_price_change
rmi['upc'][close_price_change <= 0] = 0
# Downward price change
rmi['dpc'][close_price_change < 0] = abs(close_price_change)
rmi['dpc'][close_price_change >= 0] = 0
# Wilder's Moving Average for upc and dpc
rmi['smoothed_upc'].iat[self._period + self._momentum_period - 1] = \
rmi['upc'].iloc[
self._momentum_period:self._period + self._momentum_period].mean()
rmi['smoothed_dpc'].iat[self._period + self._momentum_period - 1] = \
rmi['dpc'].iloc[
self._momentum_period:self._period + self._momentum_period].mean()
for i in range(self._period + self._momentum_period,
len(self._input_data.index)):
rmi['smoothed_upc'].iat[i] = rmi['smoothed_upc'].iat[i - 1] + (
rmi['upc'].iat[i] - rmi['smoothed_upc'].iat[i - 1]
) / self._period
rmi['smoothed_dpc'].iat[i] = rmi['smoothed_dpc'].iat[i - 1] + (
rmi['dpc'].iat[i] - rmi['smoothed_dpc'].iat[i - 1]
) / self._period
# Calculate indicator
rmi['rmi'] = 100 * (rmi['smoothed_upc'] / rmi['smoothed_dpc']) / (
1 + rmi['smoothed_upc'] / rmi['smoothed_dpc'])
return rmi[['rmi']].round(4)
def getTiSignal(self):
"""
Calculates and returns the trading signal for the calculated technical
indicator.
Returns:
{('hold', 0), ('buy', -1), ('sell', 1)}: The calculated trading
signal.
"""
# Not enough data for trading signal
if len(self._ti_data.index) < 2:
return TRADE_SIGNALS['hold']
# Overbought region
if self._ti_data['rmi'].iat[-2] < 70. < self._ti_data['rmi'].iat[-1]:
return TRADE_SIGNALS['sell']
# Oversold region
if self._ti_data['rmi'].iat[-2] > 30. > self._ti_data['rmi'].iat[-1]:
return TRADE_SIGNALS['buy']
return TRADE_SIGNALS['hold']
|
[
"vsaveris@gmail.com"
] |
vsaveris@gmail.com
|
f4b5f06d621a40f412a9d3abd8a0a2c8f2e1248b
|
26dcf8e0457156a8bde936d56a59e1099893f8c6
|
/tests/test_hmm.py
|
e97feff68370733dbcc69ffcce59a30cb789f25b
|
[
"MIT"
] |
permissive
|
SilenceWinter/MicroTokenizer
|
fc4212fb9a324e93e707edbe130b518bd782d07a
|
0b617f4b107743f6c7c473a9fac9408d21c56931
|
refs/heads/master
| 2020-03-29T04:31:23.050836
| 2018-09-18T16:40:28
| 2018-09-18T16:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `MicroTokenizer` package."""
import pytest
from MicroTokenizer.hmm import HMMTokenizer
def test_persist(tmpdir):
temp_path = tmpdir.mkdir("hmm")
temp_path_str = str(temp_path)
tokenizer = HMMTokenizer()
tokenizer.train_one_line(["我", "是", "中国人"])
tokenizer.train_one_line(["你", "打", "人"])
tokenizer.do_train()
tokenizer.persist_to_dir(temp_path_str)
assert len(temp_path.listdir()) == 3
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_segment(input_text):
tokenizer = HMMTokenizer()
tokenizer.load_model()
result = tokenizer.segment(input_text)
pytest.helpers.assert_token_equals(result, input_text)
|
[
"u1mail2me@gmail.com"
] |
u1mail2me@gmail.com
|
fc53892213fa66aece7218c6e1a0dc1a2b68968c
|
5c0a253bf2fb83db01abc99097871c965f4cf565
|
/spark/crm/PROC_A_CI_DEP_CONTRIBUTION.py
|
e899aff59721d52b99cbb6e4dbf84908af6f822b
|
[] |
no_license
|
airuibel/python-1
|
3b16553ede9d069ec56efbb12a89a4de6917a447
|
94f387e2d406fab2128bcfffce6146da720b2ccc
|
refs/heads/master
| 2020-07-05T15:43:00.957221
| 2017-09-17T14:05:48
| 2017-09-17T14:05:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_CI_DEP_CONTRIBUTION').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#---------------------------------------------------------------------------------------#
V_YEAR_MONTH = etl_date[0:4]+"-" + etl_date[4:6]
OCRM_F_CI_CON_PARM = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CON_PARM/*')
OCRM_F_CI_CON_PARM.registerTempTable("OCRM_F_CI_CON_PARM")
ACRM_F_DP_SAVE_INFO = sqlContext.read.parquet(hdfs+'/ACRM_F_DP_SAVE_INFO/*')
ACRM_F_DP_SAVE_INFO.registerTempTable("ACRM_F_DP_SAVE_INFO")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CUST_ID as CUST_ID,
CUST_NAME as CUST_NAME,
ODS_ACCT_NO as ACCT_NO,
AA.ORG_ID as ORG_ID,
CYNO as CURR,
COALESCE(MONTH_RMB,0) as MONTH_AVG,
COALESCE(MVAL_RMB,0) as YEAR_AVG,
PRODUCT_ID as CONT_SUB_ID ,
cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)) as MONTH_RATE ,
cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) as INNER_PRICE,
RUN_COST as RUN_COST ,
CASE WHEN (COALESCE(YEAR_AVG,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6))) - COALESCE(RUN_COST,0))>0 THEN (COALESCE(YEAR_AVG,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6))) - COALESCE(RUN_COST,0)) ELSE 0 END as CONTRIBUTION,
CASE WHEN (COALESCE(MVAL_RMB,0) *(cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)))- COALESCE(RUN_COST,0))>0 THEN (COALESCE(MVAL_RMB,0) * (cast(COALESCE(B.INNER_PRICE,1)/12 as decimal(24,6)) - cast(COALESCE(AA.AGREENMENT_RATE,AA.TD_IR_TP)/100/12 as decimal(24,6)))- COALESCE(RUN_COST,0)) ELSE 0 END as CONTRIBUTION_RMB,
V_YEAR_MONTH as YEAR_MONTH,
V_DT as ODS_DATE,
COALESCE(BAL_RMB,0) as BAL_RMB,
AA.FR_ID as FR_ID,
'' as FR_NAME,
CUST_TYP as CUST_TYP
FROM
(SELECT A.CUST_ID,
A.CUST_TYP,
A.CUST_NAME,
A.ODS_ACCT_NO,
A.ORG_ID,
A.CYNO,
A.BAL_RMB,
A.MONTH_RMB,
A.MVAL_RMB,
A.MONTH_AVG,
A.YEAR_AVG,
A.PRODUCT_ID,
A.TD_IR_TP,
A.AGREENMENT_RATE,
(CASE WHEN A.ACCONT_TYPE = 'H' THEN 'H001'
WHEN A.PRODUCT_ID = '999TD000100' THEN 'D001'
WHEN A.PRODUCT_ID = '999TD110600' OR A.PRODUCT_ID = '999TD000600' THEN 'D002'
WHEN A.PRODUCT_ID = '999TD110700' OR A.PRODUCT_ID = '999TD000700' THEN 'D003'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '3' THEN 'D004'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '6' THEN 'D005'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '9' THEN 'D006'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '12' THEN 'D007'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '24' THEN 'D008'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '36' THEN 'D009'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT = '60' THEN 'D010'
WHEN A.ACCONT_TYPE = 'D' AND A.PERD_UNIT > '60' THEN 'D011' END
) AS SUB_ID,
A.FR_ID
FROM ACRM_F_DP_SAVE_INFO A
WHERE A.CUST_ID <> 'X9999999999999999999'
--AND A.FR_ID = V_FR_ID
) AA
LEFT JOIN OCRM_F_CI_CON_PARM B ON AA.SUB_ID = B.SUB_ID AND AA.FR_ID = B.ORG_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
sql = re.sub(r"\bV_YEAR_MONTH\b", "'"+V_YEAR_MONTH+"'", sql)
ACRM_F_CI_DEP_CONTRIBUTION = sqlContext.sql(sql)
ACRM_F_CI_DEP_CONTRIBUTION.registerTempTable("ACRM_F_CI_DEP_CONTRIBUTION")
dfn="ACRM_F_CI_DEP_CONTRIBUTION/"+V_DT+".parquet"
ACRM_F_CI_DEP_CONTRIBUTION.cache()
nrows = ACRM_F_CI_DEP_CONTRIBUTION.count()
ACRM_F_CI_DEP_CONTRIBUTION.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_F_CI_DEP_CONTRIBUTION.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_F_CI_DEP_CONTRIBUTION/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_F_CI_DEP_CONTRIBUTION lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
[
"cysuncn@126.com"
] |
cysuncn@126.com
|
efce074729bc329b7d3d2b62f933c18cd1893b4b
|
a4deea660ea0616f3b5ee0b8bded03373c5bbfa2
|
/executale_binaries/register-variants/cmovbeq_r64_r64.gen.vex.py
|
aac0087e2d26b58209d268bec75c7d8b5c9eaab4
|
[] |
no_license
|
Vsevolod-Livinskij/x86-64-instruction-summary
|
4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd
|
c276edab1b19e3929efb3ebe7514489f66087764
|
refs/heads/master
| 2022-02-02T18:11:07.818345
| 2019-01-25T17:19:21
| 2019-01-25T17:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
import angr
proj = angr.Project('cmovbeq_r64_r64.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp()
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
76d09e968cf645ce2067c816a6f63d03efb99692
|
4920b6c12dc2427036077d38ed8fa513130418a8
|
/bipad_api/test/test_inline_response20059.py
|
7f7f5f5d6b53b4e708645271de5f31afefb61347
|
[] |
no_license
|
laxmitimalsina/covid_dashboard
|
d51a43d3ba2ad8a9754f723383f6395c1dccdda5
|
ccba8a3f5dd6dbd2b28e2479bda6e581eb23805f
|
refs/heads/master
| 2023-05-29T15:07:32.524640
| 2021-05-03T11:15:43
| 2021-05-03T11:15:43
| 273,698,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
# coding: utf-8
"""
BIPAD API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import bipad_api
from bipad_api.models.inline_response20059 import InlineResponse20059 # noqa: E501
from bipad_api.rest import ApiException
class TestInlineResponse20059(unittest.TestCase):
"""InlineResponse20059 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20059(self):
"""Test InlineResponse20059"""
# FIXME: construct object with mandatory attributes with example values
# model = bipad_api.models.inline_response20059.InlineResponse20059() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"laxmitimalsina2017@gmail.com"
] |
laxmitimalsina2017@gmail.com
|
8c0e1b2b4be9161d9d7d0227d2503c37d44d22eb
|
0c7d7b24a8d453fc1a9c2f27a08f3c4cfa46ec3b
|
/recipes/sota/2019/lm_analysis/shuffle_segments.py
|
f7e4b68d3a3f817fbc7488b29cbead6bad1659ba
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
piEYj/wav2letter
|
e6ae462eeeb6a4374f8280c8fa15d8f194c60215
|
49fbb1392e69b5194c077df9847505ec995b4e3d
|
refs/heads/main
| 2023-09-06T01:08:48.837731
| 2021-11-12T14:13:41
| 2021-11-12T14:15:15
| 444,344,109
| 1
| 0
|
NOASSERTION
| 2022-01-04T08:37:19
| 2022-01-04T08:37:19
| null |
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
import os
import random
import sys
from multiprocessing import Pool
import sox
align_file = sys.argv[1]
output_dir = sys.argv[2]
lines = []
with open(align_file) as fin:
lines = fin.readlines()
N_THREADS = 40
MIN_SIL_LENGTH = 0.13
TOLERANCE = 0.04
def process(parameters):
tid, n_samples = parameters
output_list = output_dir + "dev-other.{}.lst".format(tid)
with open(output_list, "w") as fout:
for i in range(tid * n_samples, min(len(lines), n_samples * (tid + 1))):
line = lines[i]
sp = line.split("\t")
filename = sp[0]
# print(filename)
# duration = sox.file_info.duration(filename)
alignments = sp[1].strip().split("\\n")
# Parse the alignments
chunk_starts = [0]
chunk_ends = []
words = []
cur_words = []
cur_end = 0
for i, alignment in enumerate(alignments):
sp = alignment.split()
begin = float(sp[2])
length = float(sp[3])
word = sp[4]
cur_end = begin + length
if i == 0:
continue
if word == "$":
if length > MIN_SIL_LENGTH:
chunk_ends.append(cur_end - TOLERANCE)
chunk_starts.append(cur_end - TOLERANCE)
words.append(" ".join(cur_words))
cur_words = []
continue
cur_words.append(word)
if len(cur_words) > 0:
chunk_ends.append(cur_end)
words.append(" ".join(cur_words))
else:
chunk_starts.pop()
# print(duration)
# print(chunk_starts)
# print(chunk_ends)
# print(words)
# Split the audios
order = list(range(len(chunk_starts)))
random.shuffle(order)
new_target = " ".join([words[i] for i in order])
new_audio_path = output_dir + filename.split("/")[-1]
fout.write(
"{}\t{}\t{}\t{}\n".format(
new_audio_path, new_audio_path, chunk_ends[-1] * 1000, new_target
)
)
if len(chunk_starts) == 1:
os.system("cp {} {}".format(filename, output_dir))
continue
paths = []
for i in order:
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16, rate=16000
)
sox_tfm.trim(chunk_starts[i], chunk_ends[i])
new_path = "/tmp/{}_{}.flac".format(tid, i)
sox_tfm.build(filename, new_path)
paths.append(new_path)
# Combine them
sox_comb = sox.Combiner()
sox_comb.build(list(paths), new_audio_path, "concatenate")
if __name__ == "__main__":
n_sample_per_thread = len(lines) // N_THREADS + 1
print(
"Spreading {} threads with {} samples in each".format(
N_THREADS, n_sample_per_thread
)
)
pool = Pool(N_THREADS)
pool.map(process, zip(list(range(N_THREADS)), [n_sample_per_thread] * N_THREADS))
pool.close()
pool.join()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
49f63466b6602a419c57f27b6e2d27a713646b02
|
56ade096db1fe376ee43d38c96b43651ee07f217
|
/033. Search in Rotated Sorted Array/Python/Solution.py
|
c1099f90447f301ab75df1e19f2814222c1d9484
|
[] |
no_license
|
xiaole0310/leetcode
|
c08649c3f9a9b04579635ee7e768fe3378c04900
|
7a501cf84cfa46b677d9c9fced18deacb61de0e8
|
refs/heads/master
| 2020-03-17T05:46:41.102580
| 2018-04-20T13:05:32
| 2018-04-20T13:05:32
| 133,328,416
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
length = len(nums)
low = 0
high = length - 1
while low < high:
mid = (low + high) // 2
if nums[mid] > nums[high]:
low = mid + 1
else:
high = mid
rotate = low
low = 0
high = length - 1
while low <= high:
mid = (low + high) // 2
real_mid = (mid + rotate) % length
if nums[real_mid] == target:
return real_mid
if nums[real_mid] < target:
low = mid + 1
else:
high = mid - 1
return -1
|
[
"zhantong1994@163.com"
] |
zhantong1994@163.com
|
91dcd5408203ac48530134b4c58374aad4842f14
|
e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6
|
/oslo_messaging/_drivers/zmq_driver/poller/green_poller.py
|
ab8f313f9fb8e6fd25990f75c2785310d39b15a9
|
[] |
no_license
|
KevinKaiQian/polar-bear
|
46a814c746246394f76505846166673a049f12f2
|
61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e
|
refs/heads/master
| 2022-04-29T02:15:35.536039
| 2021-05-19T12:33:07
| 2021-05-19T12:33:07
| 172,068,536
| 2
| 0
| null | 2022-03-29T21:56:51
| 2019-02-22T13:11:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from oslo_messaging._drivers.zmq_driver import zmq_poller
class GreenPoller(zmq_poller.ZmqPoller):
def __init__(self):
self.incoming_queue = eventlet.queue.LightQueue()
self.green_pool = eventlet.GreenPool()
self.thread_by_socket = {}
def register(self, socket, recv_method=None):
if socket not in self.thread_by_socket:
self.thread_by_socket[socket] = self.green_pool.spawn(
self._socket_receive, socket, recv_method)
def unregister(self, socket):
thread = self.thread_by_socket.pop(socket, None)
if thread:
thread.kill()
def _socket_receive(self, socket, recv_method=None):
while True:
if recv_method:
incoming = recv_method(socket)
else:
incoming = socket.recv_multipart()
self.incoming_queue.put((incoming, socket))
eventlet.sleep()
def poll(self, timeout=None):
try:
return self.incoming_queue.get(timeout=timeout)
except eventlet.queue.Empty:
return None, None
def close(self):
for thread in self.thread_by_socket.values():
thread.kill()
self.thread_by_socket = {}
class GreenExecutor(zmq_poller.Executor):
def __init__(self, method):
self._method = method
super(GreenExecutor, self).__init__(None)
self._done = threading.Event()
def _loop(self):
while not self._done.is_set():
self._method()
eventlet.sleep()
def execute(self):
self.thread = eventlet.spawn(self._loop)
def wait(self):
if self.thread is not None:
self.thread.wait()
def stop(self):
if self.thread is not None:
self.thread.kill()
def done(self):
self._done.set()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5b868fc577cd8428158cebc51ec4b35b5f9e7d80
|
9970ab0ad1e805f83cc4463d008ee4654cfb668e
|
/tags/2.01/AStyleTest/file-py/file-extract.py
|
342109f0b3e5c350d23ed0312153fc5378c9d14d
|
[] |
no_license
|
svn2github/Artistic-Style
|
a464a7f6cc6bd11aec2a3452a9736e638630ecd8
|
6bd4db522937a182e63db96dbc095f2baae8a17a
|
refs/heads/master
| 2020-12-08T06:01:08.497124
| 2018-04-05T22:36:21
| 2018-04-05T22:36:21
| 67,278,407
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
#! /usr/bin/python
# Calls libextract to extract files in the TestArchives directory.
# Change the global variables to the desired values.
import libastyle #local directory
import libextract #local directory
import os
import sys
import time
# global variables ------------------------------------------------------------
# select one of the following from libastyle
# CODEBLOCKS
# CODELITE
# JEDIT
# KDEVELOP
# SCITE
# SHARPDEVELOP
# TESTPROJECT
project = libastyle.TESTPROJECT
# -----------------------------------------------------------------------------
def extract_project():
"""Call the library procedure to extract the requested project.
"""
starttime = time.time()
libextract.extract_project(project)
stoptime = time.time()
print_run_time(starttime, stoptime)
# -----------------------------------------------------------------------------
def print_run_time(starttime, stoptime):
"""Print run time for the test.
"""
runtime = int(stoptime - starttime + 0.5)
min = runtime / 60
sec = runtime % 60
if min == 0:
print "{0} seconds".format(sec)
else:
print "{0} min {1} seconds".format(min, sec)
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
libastyle.set_text_color()
extract_project()
libastyle.system_exit()
# -----------------------------------------------------------------------------
|
[
"jimp03@1fe3c263-5997-42ff-936f-87a7378ef0cd"
] |
jimp03@1fe3c263-5997-42ff-936f-87a7378ef0cd
|
4304b728a6cae825d940ba9d9e818606ca8eb1b0
|
aaf045878465b2b26ff7ea12eb72453446cbd428
|
/flaskRESTful/app.py
|
e19033a24a3162b6c6ec4ffe77a40785473fac05
|
[] |
no_license
|
mishrakeshav/REST-APIs-with-Flask-and-Python
|
c35a0e61c75763459227079c524eaf1dceb078f3
|
2b9dfdcb8da8d487713cd85cee9ee0aa3e65d974
|
refs/heads/master
| 2022-04-24T23:06:52.675441
| 2020-04-30T07:57:54
| 2020-04-30T07:57:54
| 257,958,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
from flask import Flask,request
from flask_restful import Resource , Api, reqparse
from flask_jwt import JWT ,jwt_required
from security import authenticate,identity
app = Flask(__name__)
app.secret_key = "keshav"
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
items = []
class Item(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'price',
type=float,
required = True,
help = "This field cannot be left blank"
)
@jwt_required()
def get(self,name):
item = next(filter(lambda x : x['name'] == name , items),None)
return {'item':item},200 if item else 404
def post(self,name):
if next(filter(lambda x : x['name'] == name,items),None):
return {"message":"An item with name {} already exists.".format(name)},400
data = Item.parser.parse_args()
item = {'name':name, 'price':data['price']}
items.append(item)
return item,201
def delete(self,name):
global items
items = list(filter(lambda x : x['name']!= name, items))
return {"message" : "item deleted"}
def put(self,name):
data = Item.parser.parse_args()
item = next(filter(lambda x : x['name'] == name , items), None)
if item is None:
item = {'name':name,'price': data['price']}
items.append(item)
else:
item.update(data)
return item
class ItemsList(Resource):
def get(self):
return {'items':items}
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemsList, '/items')
if __name__ == "__main__":
app.run(debug=True)
|
[
"mishrakeshav@users.noreply.github.com"
] |
mishrakeshav@users.noreply.github.com
|
1d2033a62e150fe1e23310385b00a8eba8c7586f
|
2a7acc39c637824dd6974fa154664ef9eca4383e
|
/app/utils/validators.py
|
3734657aa887f9ffb80c401926e70d99a84f7645
|
[] |
no_license
|
pigmonchu/backfiles
|
e2a9415902f780708928065770486c6a8ee34e15
|
d0e8248d2949710f56d62777fb2a57727de39302
|
refs/heads/master
| 2022-10-04T12:10:50.078244
| 2019-08-26T22:30:20
| 2019-08-26T22:30:20
| 204,572,276
| 0
| 0
| null | 2022-09-16T18:08:43
| 2019-08-26T22:27:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import importlib
from werkzeug.datastructures import MultiDict
import inspect
'''
Gestión de validaciones y mensajes de error
'''
def validate_form(blueprint_import_path, class_name, data):
m_data = MultiDict(data)
module = importlib.import_module('{}.forms'.format(blueprint_import_path))
form_class = getattr(module, class_name)
form = form_class(m_data)
resp = form.validate()
return resp, form
class ResponseJSON():
__code__ = None
def __init__(self, data, status=False):
self.status = status
self.data = data
@property
def status(self):
return 'success' if self.__status__ else 'fail'
@status.setter
def status(self, value):
self.__status__ = value
def __repr__(self):
return 'Response <{}>:{}'.format(self.status, self.data)
'''
Serializar JSON objetos (sólo parte pública)
'''
def public_attr_to_dict(obj):
obj_dict = {}
for key, value in inspect.getmembers(obj):
if key[:2] != '__' and not inspect.ismethod(getattr(obj, key)):
obj_dict[key] = value
return obj_dict
|
[
"monterdi@gmail.com"
] |
monterdi@gmail.com
|
3b8c7c687f2e9d9835d220c82d5e677e59f7cea6
|
dcbe50ee6cb4dc108e71df95479d9fd6e868e4e6
|
/Torch/6_nn.py
|
990f6dd84670a000b7a0c511a2bad9919fc6d998
|
[] |
no_license
|
krishnakalyan3/DeepLearning-Experiments
|
1854821bb630a0ce2f4dea2423350c1b303d954b
|
7b7d9e9570e787b162c68e2734aa6b0c6567f257
|
refs/heads/master
| 2020-07-03T16:46:09.491095
| 2017-07-20T03:18:44
| 2017-07-20T03:18:44
| 74,243,657
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
#!/usr/bin/env python3
# https://github.com/PythonWorkshop/Intro-to-TensorFlow-and-PyTorch/blob/master/PyTorch%20Tutorial.ipynb
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.activation import Softmax
import torch.optim as optim
df = pd.read_csv('Data/winequality-red-cleaned.csv', sep=',')
y = pd.DataFrame([0. if item == 'Good' else 1. for item in df['category']])
X = df.drop(['quality', 'category'], axis=1)
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
learning_rate = 0.005
|
[
"krishnakalyan3@gmail.com"
] |
krishnakalyan3@gmail.com
|
1aa7ae250a923bee4235a9567487ed406c8f8edd
|
e5873fabe08bac8c298026871bc3a562e330151e
|
/openrasp_iast/plugin/scanner/directory_basic.py
|
13fdb506795d49166c2fb0b801e70926a8e1ca03
|
[
"Apache-2.0"
] |
permissive
|
1u0Hun/openrasp-iast
|
7a93d33301fdeae8021f4742870068f2d09f62bb
|
8b98e4ffda52c3e04bfaa682dde219e78c87c21a
|
refs/heads/master
| 2020-11-30T13:57:55.088148
| 2019-12-20T06:42:08
| 2019-12-20T06:42:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Copyright 2017-2019 Baidu Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from core.components.plugin import scan_plugin_base
class ScanPlugin(scan_plugin_base.ScanPluginBase):
plugin_info = {
"name": "directory_basic",
"show_name": "目录遍历检测插件",
"description": "基础目录遍历漏洞检测插件"
}
def mutant(self, rasp_result_ins):
"""
测试向量生成
"""
if not rasp_result_ins.has_hook_type("directory"):
return
linux_payload_list = [
("../../../../../../../../../../../../../../../../../../../../etc", "/etc"),
("../../../../etc", "/etc"),
("/etc", "/etc")
]
windows_payload_list = [
("..\\..\\..\\..\\..\\..\\..\\..\\..\\windows", ":\\windows"),
("c:\\windows", "c:\\windows")
]
mac_payload_list = [
("../../../../../../../../../../../../../../../../../../../../private/etc", "/private/etc"),
("../../../private/etc", "/private/etc"),
("/private/etc", "/private/etc")
]
server_os = rasp_result_ins.get_server_info()["os"]
if server_os == "Windows":
payload_list = windows_payload_list
elif server_os == "Mac":
payload_list = mac_payload_list
else:
payload_list = linux_payload_list
# 获取所有待测试参数
request_data_ins = self.new_request_data(rasp_result_ins)
test_params = self.mutant_helper.get_params_list(
request_data_ins, ["get", "post", "json", "headers", "cookies"])
for param in test_params:
if not request_data_ins.is_param_concat_in_hook("directory", param["value"].rstrip("/\\")):
continue
payload_seq = self.gen_payload_seq()
for payload in payload_list:
request_data_ins = self.new_request_data(rasp_result_ins, payload_seq, payload[1])
request_data_ins.set_param(param["type"], param["name"], payload[0])
hook_filter = [{
"type": "dir",
"filter": {
"code": payload[1]
}
}]
request_data_ins.set_filter(hook_filter)
request_data_list = [request_data_ins]
yield request_data_list
def check(self, request_data_list):
"""
请求结果检测
"""
request_data_ins = request_data_list[0]
feature = request_data_ins.get_payload_info()["feature"]
rasp_result_ins = request_data_ins.get_rasp_result()
if rasp_result_ins is None:
return None
if self.checker.check_concat_in_hook(rasp_result_ins, "directory", feature):
return "读取的目录可被用户输入控制"
else:
return None
|
[
"350905402@qq.com"
] |
350905402@qq.com
|
5ab15084fd09e6973269b95c1650ba480596b272
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2700486_0/Python/Venia/prob.py
|
86e3993e498a5d987a87e9293bcae14108d3f127
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,633
|
py
|
#from functools import lru_cache
from fractions import Fraction
from copy import copy
def sim(n):
s = {(0, 0)}
for i in range(20):
s.add((2*i+1, -1))
s.add((-2*i+1,-1))
d = [(s, 1)]
for i in range(n-1):
dd = []
for pos, P in d:
i = 0
while (0, i+2) in pos:
i += 2
if (1, i+1) in pos and (-1, i+1) in pos:
cp = copy(pos)
cp.add((0, i+2))
dd.append((cp, P))
elif (1, i+1) in pos and (-1, i+1) not in pos:
cp = copy(pos)
x, y = -1, i+1
while (x-1, y-1) not in pos:
x, y = x-1, y-1
cp.add((x, y))
dd.append((cp, P))
elif (1, i+1) not in pos and (-1, i+1) in pos:
cp = copy(pos)
x, y = 1, i+1
while (x+1, y-1) not in pos:
#print(x, y)
x, y = x+1, y-1
cp.add((x, y))
dd.append((cp, P))
else:
cp1 = copy(pos)
cp2 = copy(pos)
x, y = 1, i+1
while (x+1, y-1) not in pos:
#print(x, y)
x, y = x+1, y-1
cp1.add((x, y))
dd.append((cp1, P*Fraction(1, 2)))
x, y = -1, i+1
while (x-1, y-1) not in pos:
#print(x, y)
x, y = x-1, y-1
cp2.add((x, y))
dd.append((cp2, P*Fraction(1, 2)))
d = dd
return d
ds = [None] + [sim(i) for i in range(1, 21)]
def silly_prob(x, y, n):
d = ds[n]
res = 0
for poss, P in d:
if (x, y) in poss:
res += P
return res
T = int(raw_input())
for i in range(T):
n, x, y = map(int, raw_input().split())
print("Case #{}: {}".format(i+1, float(silly_prob(x, y, n))))
#@lru_cache(maxsize = None)
def prob(x, y, n):
print(x, y, n)
if y == -1 and x%2 == 1: return 1
elif n == 1:
return 1 if (x, y) == (0, 0) else 0
elif x < 0:
return prob(-x, y, n-1)
elif x > 0:
res = prob(x, y, n-1) # it's already there
if y != 0:
return
res + (1-prob(x, y, n-1)) * prob(x+1, y-1, n-1) # right down must be there
# if RD is there, LD and R are also there
return res
else:
# x = 0
#
return prob(x, y, n-1) + (1-prob(x, y,n-1))*prob(x-1, y-1, n-1)*prob(x+1, y-1, n-1)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
13f1a79700d0944ac29487331e54040083749301
|
7d2f933ed3c54e128ecaec3a771817c4260a8458
|
/venv/Lib/site-packages/hamcrest/core/matcher.py
|
9694cdc65f3a3d50e5c962cf8660c9ebfdeb01ff
|
[] |
no_license
|
danielmoreira12/BAProject
|
c61dfb1d0521eb5a28eef9531a00e744bfb0e26a
|
859f588305d826a35cc8f7d64c432f54a0a2e031
|
refs/heads/master
| 2021-01-02T07:17:39.267278
| 2020-02-25T22:27:43
| 2020-02-25T22:27:43
| 239,541,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
from hamcrest.core.description import Description
from typing import Generic, Optional, TypeVar
from .selfdescribing import SelfDescribing
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
T = TypeVar("T")
class Matcher(Generic[T], SelfDescribing):
"""A matcher over acceptable values.
A matcher is able to describe itself to give feedback when it fails.
Matcher implementations should *not* directly implement this protocol.
Instead, *extend* the :py:class:`~hamcrest.core.base_matcher.BaseMatcher`
class, which will ensure that the
:py:class:`~hamcrest.core.matcher.Matcher` API can grow to support new
features and remain compatible with all
:py:class:`~hamcrest.core.matcher.Matcher` implementations.
"""
def matches(self, item: T, mismatch_description: Optional[Description] = None) -> bool:
"""Evaluates the matcher for argument item.
If a mismatch is detected and argument ``mismatch_description`` is
provided, it will generate a description of why the matcher has not
accepted the item.
:param item: The object against which the matcher is evaluated.
:param mismatch_description:
:returns: ``True`` if ``item`` matches, otherwise ``False``.
"""
raise NotImplementedError("matches")
def describe_mismatch(self, item: T, mismatch_description: Description) -> None:
"""Generates a description of why the matcher has not accepted the
item.
The description will be part of a larger description of why a matching
failed, so it should be concise.
This method assumes that ``matches(item)`` is ``False``, but will not
check this.
:param item: The item that the
:py:class:`~hamcrest.core.matcher.Matcher` has rejected.
:param mismatch_description: The description to be built or appended
to.
"""
raise NotImplementedError("describe_mismatch")
def describe_match(self, item: T, match_description: Description) -> None:
"""Generates a description of why the matcher has accepted the item.
The description may be part of a larger description of why a matching
failed, so it should be concise.
This method assumes that ``matches(item)`` is ``True``, but will not
check this.
:param item: The item that the
:py:class:`~hamcrest.core.matcher.Matcher` has accepted.
:param match_description: The description to be built or appended to.
"""
raise NotImplementedError("describe_match")
|
[
"danielmoreira12@github.com"
] |
danielmoreira12@github.com
|
fe086916c88a8fb986cfa09b74afb9490397ab2f
|
86df6f8f4f3c03cccc96459ad82bcdf3bf942492
|
/lintcode/majority-number.py
|
e759f4f141d0ff3c9823992ea18f1ff927931f25
|
[] |
no_license
|
bdliyq/algorithm
|
369d1fd2ae3925a559ebae3fa8f5deab233daab1
|
e1c993a5d1531e1fb10cd3c8d686f533c9a5cbc8
|
refs/heads/master
| 2016-08-11T21:49:31.259393
| 2016-04-05T11:10:30
| 2016-04-05T11:10:30
| 44,576,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# Question: http://www.lintcode.com/en/problem/majority-number/
class Solution:
"""
@param nums: A list of integers
@return: The majority number
"""
def majorityNumber(self, nums):
# write your codeare
if len(nums) == 0:
return 0
count = 1
last_num = nums[0]
for n in nums[1:]:
if n == last_num:
count += 1
else:
count -= 1
if count == 0:
last_num = n
count += 1
return last_num
if __name__ == '__main__':
s = Solution()
print s.majorityNumber([1, 1, 1, 1, 2, 2, 2])
|
[
"liyongqiang01@baidu.com"
] |
liyongqiang01@baidu.com
|
3c227d101d4c7c8c94b047534ef9453524806f5f
|
6e95e9b6a1fc996ebcb46c44d4ef7678f762e4f7
|
/others/xiangmu/Shandong/淄博.py
|
bea8f6c3343a4e8c8f6c56153246b4f980f88462
|
[
"Apache-2.0"
] |
permissive
|
625781186/lgd_spiders
|
3a4d6917a01e446136e7aef4c92b9b7a1f8e498d
|
1c8680115beb42f4daaf6be71bf3fb14fcc2c255
|
refs/heads/master
| 2020-08-29T13:21:12.116395
| 2019-10-21T14:28:00
| 2019-10-21T14:28:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,699
|
py
|
# -*- coding: utf-8 -*-
import requests
import re
import time
from lxml import etree
import datetime
from common.update_mongo import Update
from common.update_mongo import Update2
from common.spider_class import TongyongSpider
now = datetime.datetime.now().strftime('%Y/%m/%d')
city = 'zibo'
class Producer(TongyongSpider):
def __init__(self, redis_db):
super(Producer, self).__init__(redis_db)
self.url = 'http://www.zbfdc.com.cn/web/building/list?page={}'
def get_links(self, url):
for i in range(1, 5):
try:
response = requests.get(url, headers=self.headers,timeout=40)
text = response.text
html = etree.HTML(text)
a_list = html.xpath('//ul[@class="list"]//li//a/@href')
for a in a_list:
print(a)
self.db.sadd(self.redis_db, a)
return
except Exception as e:
print(url, e)
def run(self):
for i in range(1, 705):
self.get_links(self.url.format(i))
time.sleep(0.5)
class Consumer(TongyongSpider):
def parse_detail(self, url):
for i in range(1, 5):
try:
response = requests.get(url, headers=self.headers,timeout=40)
text = response.text
if response.text == '{"success":false,"fieldErrors":null,"msg":"楼盘下无房屋","data":null}': return 1
html = etree.HTML(text)
position = re.sub(r'\s', '', ''.join(html.xpath('//div[@class="building-title"]//text()')))
ul = html.xpath('//ul[@class="clearfix"]')[0]
pro_name = re.sub(r'\s', '', ''.join(ul.xpath('./li[1]/span[2]//text()')))
company = re.sub(r'\s', '', ''.join(ul.xpath('./li[7]/span[2]//text()')))
area = re.sub(r'\s', '', ''.join(ul.xpath('./li[8]/span[2]//text()')))
ca_num = re.sub(r'\s', '', ''.join(ul.xpath('./li[9]/span[2]//text()')))
sale_num = re.sub(r'\s', '', ''.join(ul.xpath('./li[10]/span[2]//text()')))
yongdi_time = re.sub(r'\s', '', ''.join(ul.xpath('./li[2]/span[2]//text()')))
yongdi_time = re.search(r'(20\d\d)', yongdi_time)
yongdi_time = yongdi_time.group(1) if yongdi_time else ''
gongcheng_time = re.sub(r'\s', '', ''.join(ul.xpath('./li[4]/span[2]//text()')))
gongcheng_time = re.search(r'(20\d\d)', gongcheng_time)
gongcheng_time = gongcheng_time.group(1) if gongcheng_time else ''
pan_time = ''
price = ''
ca_time = ''
build = (pro_name, ca_num, ca_time, pan_time, sale_num, area, price, position, company, now, url)
print(build)
Update2(build, city)
return 1
except Exception:
print('解析详情页异常')
if i == 4:
return 1
def run(self):
while True:
set_num = self.db.scard(self.redis_db)
if set_num == 0:
print('数目为0')
time.sleep(10)
set_num2 = self.db.scard(self.redis_db)
if set_num2 == 0: return
link = self.db.spop(self.redis_db)
num = self.parse_detail(link)
if num == 1:
time.sleep(0.5)
pass
else:
self.db.sadd(self.redis_db, link)
def run():
p = Producer('SdZibo:Detail')
p.run()
c = Consumer('SdZibo:Detail')
c.run()
if __name__ == '__main__':
run()
|
[
"lgdupup"
] |
lgdupup
|
fbd90a1183ff5f2c498044ca317aadebbf6dab6c
|
2f63688febd21dc3ae6b19abfa79ad313c820154
|
/0063_Unique_Paths_II/try_2.py
|
52c31b667efe89f6f4c2c7c52a459bda0a3052c2
|
[] |
no_license
|
novayo/LeetCode
|
cadd03587ee4ed6e35f60294070165afc1539ac8
|
54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7
|
refs/heads/master
| 2023-08-14T00:35:15.528520
| 2023-07-30T05:56:05
| 2023-07-30T05:56:05
| 200,248,146
| 8
| 1
| null | 2022-11-19T04:37:54
| 2019-08-02T14:24:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
'''
上左是石頭的則+0
'''
dp = [[0 for j in range(len(obstacleGrid[0]))] for i in range(len(obstacleGrid))]
# 1th row
flag = True
for j in range(len(obstacleGrid[0])):
if flag and obstacleGrid[0][j] == 0:
dp[0][j] = 1
else:
dp[0][j] = 0
flag = False
# 1th col
flag = True
for i in range(len(obstacleGrid)):
if flag and obstacleGrid[i][0] == 0:
dp[i][0] = 1
else:
dp[i][0] = 0
flag = False
# loop remain
for i in range(1, len(obstacleGrid)):
for j in range(1, len(obstacleGrid[0])):
if obstacleGrid[i][j] == 1:
continue
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
|
[
"f14051172@gs.ncku.edu.tw"
] |
f14051172@gs.ncku.edu.tw
|
bd955766f2c05e3737ab33a80c4b5c543ad7629c
|
59f03c7528c9c806e3e25b9864db89f25dfa73c2
|
/tests/onegov/swissvotes/test_views_exceptions.py
|
968fb33a50f3a189a70f68d75e43cdb109cc258f
|
[
"MIT"
] |
permissive
|
OneGov/onegov-cloud
|
8d8cd6d0378991ebc2333b62337246719102e723
|
c706b38d5b67692b4146cdf14ef24d971a32c6b8
|
refs/heads/master
| 2023-08-24T15:37:52.536958
| 2023-08-24T14:15:54
| 2023-08-24T14:15:54
| 189,431,418
| 17
| 4
|
MIT
| 2023-09-14T20:39:37
| 2019-05-30T14:47:14
|
Python
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
from webtest import TestApp as Client
def test_view_exceptions(swissvotes_app):
client = Client(swissvotes_app)
client.get('/locale/de_CH').follow()
assert (
"Sie versuchen eine Seite zu öffnen, für die Sie nicht autorisiert "
"sind"
) in client.get('/votes/update', status=403)
assert (
"Die angeforderte Seite konnte nicht gefunden werden."
) in client.get('/abstimmungen', status=404)
|
[
"denis.krienbuehl@seantis.ch"
] |
denis.krienbuehl@seantis.ch
|
24b7be1f03ebf1b37b40a37b19178385c6947d2b
|
ea1ec59c934acc6dfbaa4c8e63349b2d391e9c25
|
/pandas-ta-quant-plot/pandas_ta_quant_plot/__init__.py
|
adc97cab839b9e24088969636475747502329f41
|
[
"MIT"
] |
permissive
|
Allensmile/pandas-ml-quant
|
475c09573b47ea3d589c94644edbd85d9d1917b2
|
59b702307c1842b0b89b5cbf755c1296da97b00a
|
refs/heads/master
| 2023-06-19T00:24:13.263411
| 2021-06-19T09:30:05
| 2021-06-19T09:30:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
"""Augment pandas DataFrame with methods for quant analysis plotting"""
__version__ = '0.2.0'
from collections import namedtuple
from pandas_ta_quant_plot.plots import *
from pandas.core.base import PandasObject
from pandas_ta_quant_plot.ta_plot_context import PlotContext
_ta = getattr(PandasObject, "ta", None)
if _ta is not None:
if getattr(_ta, "plot", None) is None:
setattr(PandasObject, "plot", lambda self, *args, **kwargs: PlotContext(self, *args, **kwargs))
else:
ta = namedtuple("TA", ["plot"])
setattr(PandasObject, "ta", lambda self, *args, **kwargs: ta(plot=PlotContext(self, *args, **kwargs)))
|
[
"kic@kic.kic"
] |
kic@kic.kic
|
a12fa220e2127d65695380961b8a907ca2a9640b
|
6e9a420485b25684b178984fd90f74bbe3ab5b58
|
/api/vm/define/vm_define_nic.py
|
77484d97193ab6c5c1f3b42141c180c0d77bf09d
|
[
"Apache-2.0"
] |
permissive
|
BillTheBest/esdc-ce
|
d367a88685548d41672c773364484ca7f82c4a26
|
ab71bf9dc66fb78a0de724077c02c58bc7d970ec
|
refs/heads/master
| 2021-04-26T23:34:06.535783
| 2018-02-25T19:19:15
| 2018-02-25T19:19:15
| 124,016,413
| 1
| 0
|
Apache-2.0
| 2018-03-06T03:41:33
| 2018-03-06T03:41:32
| null |
UTF-8
|
Python
| false
| false
| 4,253
|
py
|
from django.db.transaction import atomic
from api import status as scode
from api.utils.db import get_listitem
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.vm.define.utils import is_vm_operational
from api.vm.define.api_views import VmDefineBaseView
from api.vm.define.serializers import VmDefineNicSerializer
from api.vm.messages import LOG_NIC_CREATE, LOG_NIC_UPDATE, LOG_NIC_DELETE
NIC_ID_MIN = 0
NIC_ID_MAX = 5
def _nic_params(fun):
"""Decorator for nic functions below"""
def wrap(view, vm, nic_id, *args, **kwargs):
if nic_id is None and view.diff:
return SuccessTaskResponse(view.request, view.get_diff(vm))
if view.active:
vm.revert_active(json_only=True)
if nic_id is None:
nic = vm.json_get_nics()
nics = None
kwargs['many'] = True
else:
nics, nic = get_listitem(view.request, vm.json_get_nics(), nic_id, name='VM NIC',
max_value=NIC_ID_MAX, min_value=NIC_ID_MIN)
return fun(view, vm, nic_id, nics, nic, *args, **kwargs)
return wrap
class VmDefineNicView(VmDefineBaseView):
def get_diff(self, vm):
"""Show nic differences between active and in db json. Implies full and denies active vm_define_nic."""
def_current = VmDefineNicSerializer(self.request, vm, vm.json_get_nics(), nic_id=None, many=True).data
def_active = VmDefineNicSerializer(self.request, vm, vm.json_active_get_nics(), nic_id=None, many=True).data
return self._diff_lists(def_active, def_current)
# noinspection PyUnusedLocal
@_nic_params
def get(self, vm, nic_id, nics, nic, data, many=False):
"""Get VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic, nic_id=nic_id, many=many)
return SuccessTaskResponse(self.request, ser.data, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def post(self, vm, nic_id, nics, nic, data):
"""Create VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic_id=nic_id, data=data)
if ser.is_valid():
nics[nic_id] = ser.jsondata
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data,
status=scode.HTTP_201_CREATED, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_CREATE)
ser.save_ip(res.data.get('task_id')) # Always save ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
@is_vm_operational
@atomic
@_nic_params
def put(self, vm, nic_id, nics, nic, data):
"""Update VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic.copy(), nic_id=nic_id, data=data, partial=True)
if ser.is_valid():
nics[nic_id].update(ser.jsondata)
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_UPDATE)
ser.update_ip(res.data.get('task_id')) # Always update ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def delete(self, vm, nic_id, nics, nic, data):
"""Delete VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic)
del nics[nic_id]
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip(delete=True))
res = SuccessTaskResponse(self.request, None, vm=vm,
detail='nic_id=' + str(nic_id + 1),
msg=LOG_NIC_DELETE)
ser.delete_ip(res.data.get('task_id')) # Set ip.vm to None
return res
|
[
"daniel@kontsek.sk"
] |
daniel@kontsek.sk
|
12931f85fab5e70495912647ce9e0e73a0b33b5f
|
7f114a1fb511b816c116d5b9e67cb998e3e23956
|
/PyproS42.py
|
2376562757fa81b9b049f4905404b79c591ba9a8
|
[] |
no_license
|
Bharanij27/bharanirep
|
90ac34eb28deaa7ec96d042de456de71b96866d7
|
982133a7939c889d433c178a601441fa087293d9
|
refs/heads/master
| 2021-08-07T20:22:36.244395
| 2020-06-05T04:58:10
| 2020-06-05T04:58:10
| 186,580,768
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
n,k=map(int,input().split())
l=list(map(int,input().split()))
m=ma=-99999999999
def split(l,i):
m=(max(min(l[:i]),min(l[i:])))
return m
for i in range(1,n):
m=split(l,i)
if m>ma: ma=m
print(ma)
|
[
"noreply@github.com"
] |
Bharanij27.noreply@github.com
|
80e7408c473f7878fa0a47f087e8e936739924c4
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/15.Tomcat/Tomcat16-Connector组件.py
|
394d6907603f9e207e89c25c721ef85e84967fe6
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120
| 2021-09-11T08:18:17
| 2021-09-11T08:18:17
| 307,636,242
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,537
|
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="tomcat"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("Connector组件")
r2=s2.getRootTopic()
r2.setTitle("Connector组件")
content={
'HTTP阻塞模式协议——Http11Protocol':[
{'套接字接收终端——JIoEndpoint':[
'端口监听客户端请求,接收套接字连接,提供一个线程池处理接收到的套接字连接,负责对连接数的控制,负责安全与非安全套接字连接的实现等',
{'LimitLatch(连接数控制器)':[
'控制套接字连接个数->控制流',
'BIO模式,连接数:线程数=1:1',
'默认情况,Tomcat处理连接池的线程数为200->BIO流量控制阀门大小也默认为200'
]},
{'Acceptor(套接字接收器)':[
'监听是否有客户端套接字连接并接收套接字',
'将套接字交由Executor执行'
]},
{'ServerSocketFactory套接字工厂':[
'接收终端安全配置不同,套接字不同,引入了工厂模'
]},
{'Executor任务执行器':[
'使用JUC工具包的ThreadPoolExecutor类'
]},
{'SocketProcessor(任务定义器)':[
'处理套接字并响应客户端',
'连接数计数器减1',
'关闭套接字'
]}
]},
{'HTTP阻塞处理器——Http11Processor':[
'套接字的读写和过滤,请求报文解析,生成Request对象,响应内容解析,生成Response对象',
'套接字输入缓冲装置——InternalInputBuffer',
'4个过滤器:IdentityInputFilter、VoidInputFilter、BufferedInputFilter、ChunkedInputFilter',
{'套接字输出缓冲装置——InternalOutputBuffer':[
'OutputStream:套接字的输出通道,通过其将字节写入到操作系统底层',
'OutputStreamOutputBuffer:提供字节流输出的通道,与OutputFilter组合实现过滤效果',
'OutputFilter:过滤器组件',
'ByteChunk:为某个流添加缓冲功能'
]}
]}
],
'HTTP非阻塞模式协议——Http11NioProtocol':[
{'非阻塞接收终端——NioEndpoint':[
'LimitLatch(连接数控制器):对于NIO模式,Tomcat默认流量阀门为10 000',
'Acceptor(套接字接收器):负责接收套接字连接并注册到通道队列里面',
'Poller(轮询器):负责轮询检查事件列表',
{'Poller(轮询器)':[
'负责轮询检查事件列表',
'内部依赖JDK的Selector对象进行轮询,选择出待处理的事件,每轮询一次就选出若干需要处理的通道'
]},
'Poller池:包含了若干Poller组件',
{'SocketProcessor(任务定义器)':[
'用NIO方式读取套接字并进行处理,输出响应报文',
'连接数计数器减一腾出通道',
'关闭套接字'
]},
'Executor(任务执行器)'
]},
{'HTTP非阻塞处理器——Http11NioProcessor':[
'提供了对HTTP协议非阻塞模式的处理,作用同Http11Processor'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"xiongmengmeng@qipeipu.com"
] |
xiongmengmeng@qipeipu.com
|
39196a1a8ee52ea5b7922cb7fe6d55035522c25f
|
a821e5a6e45665f7e219e3e3ed07c150219e4add
|
/exercicio87.py
|
15af26a0b52b2b4d33419be908b3021ddf0dc2b6
|
[] |
no_license
|
andreplacet/exercicios_python
|
18a28af942eb2bb211438f0aca10d651b7324fe5
|
0affe524e99f7739b08fdf58e2b54c5b577c8624
|
refs/heads/master
| 2020-08-29T02:05:52.850805
| 2020-06-01T19:09:50
| 2020-06-01T19:09:50
| 217,887,722
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
lista1 = []
lista2 = []
lista3 = []
matriz = []
somapares = somacoluna = 0
for c in range(0, 3):
lista1.append(int(input(f'Digite um valor para a [linha 0 coluna {c}]: ')))
matriz.append(lista1[:])
for i in range(0, 3):
lista2.append(int(input(f'Digite um valor para a [linha 1 coluna {i}]: ')))
matriz.append(lista2[:])
for d in range(0, 3):
lista3.append(int(input(f'Digite um valor para a [linha 2 coluna {d}]: ')))
matriz.append(lista3[:])
for num in matriz:
print(f'[ {num[0]} ] [ {num[1]} ] [ {num[2]} ]')
for par in matriz:
for j in range(0, len(par)):
if par[j] % 2 ==0:
somapares += par[j]
for colunaum in matriz:
somacoluna += colunaum[2]
print('-=' * 20)
print(f'A soma de todos os valores pares é: {somapares}')
print(f'A soma dos valores da terceia coluna é: {somacoluna}')
print(f'O maior valor da segunda linha é {max(lista2)}')
print('-=' * 20)
print('\033[33mFinalizado com Sucesso!\033[m')
|
[
"andreplacet@gmail.com"
] |
andreplacet@gmail.com
|
b0567b632314b432d96a5ec767e7e49d16a3a590
|
1f32af53c3f0d1cf1176e72a887135e1e5309e4b
|
/en/1_dqn/dqn.py
|
f55918bfaba1c556492ae116865e843d11dc1e8d
|
[
"MIT"
] |
permissive
|
seungjaeryanlee/rainbow-ride
|
bff020c61fd86d03993d4f7c68d965d071753105
|
29b0af19f5cc3d41433a8b405e736bc49309f540
|
refs/heads/master
| 2020-03-20T23:30:51.723184
| 2018-06-22T04:43:45
| 2018-06-22T04:43:47
| 137,849,945
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
import torch.nn as nn
class DQN(nn.Module):
"""
A simple Deep Q-Network with fully connected layers.
"""
def __init__(self, input_dims, output_dims):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(input_dims, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, output_dims)
)
def forward(self, x):
return self.layers(x)
|
[
"seungjaeryanlee@gmail.com"
] |
seungjaeryanlee@gmail.com
|
af6517f8f612bd6daa93525c115f29b30d940596
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02997/s415838912.py
|
f36fccc0a8389864ad1d55d8962f2354c082d9bc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
N, K = map(int, input().split())
edges = [(1, to) for to in range(2, N + 1)]
M = (N - 1) * (N - 2) // 2
if K > M:
print(-1)
exit()
for fr in range(2, N + 1):
for to in range(fr + 1, N + 1):
if M == K:
break
edges.append((fr, to))
M -= 1
print(len(edges))
for fr ,to in edges:
print(fr ,to)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c738a11291e2651813c258b3b4ef5316c6a08b75
|
3076bd73c41ed665c987d99218b8a3599fa05ec2
|
/tests/test_hopfield_net.py
|
c3a7716eb7e004205ee9b22386d2acd827f3efe6
|
[
"Apache-2.0"
] |
permissive
|
lantunes/cellpylib
|
5135a6986e68424d9ec8b09fb42421b3dcf046d1
|
743e936d48f8520f6f4ac652570ac7bb46414189
|
refs/heads/master
| 2023-03-07T03:31:32.380400
| 2023-02-21T12:34:28
| 2023-02-21T12:34:28
| 126,618,694
| 203
| 32
|
Apache-2.0
| 2023-02-15T03:40:38
| 2018-03-24T16:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
import unittest
import cellpylib as cpl
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHopfieldNet(unittest.TestCase):
def test_hopfield_net(self):
np.random.seed(0)
# patterns for training
zero = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0,
0, 0, 0, 0, 0]
one = [
0, 1, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0]
two = [
1, 1, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 1, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
# replace the zeroes with -1 to make these vectors bipolar instead of binary
one = [-1 if x == 0 else x for x in one]
two = [-1 if x == 0 else x for x in two]
zero = [-1 if x == 0 else x for x in zero]
P = [zero, one, two]
hopfield_net = cpl.HopfieldNet(num_cells=35)
hopfield_net.train(P)
expected_weights = self._convert_to_ndarray("hopfield_net_weights.txt")
np.testing.assert_equal(expected_weights, hopfield_net.W)
expected_activities = self._convert_to_ndarray("hopfield_net.ca")
half_two = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
half_two = [-1 if x == 0 else x for x in half_two]
cellular_automaton = np.array([half_two])
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=155,
apply_rule=hopfield_net.apply_rule, r=hopfield_net.r)
np.testing.assert_equal(expected_activities, cellular_automaton)
def _convert_to_ndarray(self, filename, dtype=int):
with open(os.path.join(THIS_DIR, 'resources', filename), 'r') as content_file:
content = content_file.read()
content = content.replace('[[', '')
content = content.replace(']]', '')
content = content.replace('[', '')
content = content.replace('],', ';')
content = [[dtype(i) for i in x.split(',')] for x in content.split(';')]
return np.array(content)
|
[
"lantunes@gmail.com"
] |
lantunes@gmail.com
|
ce6d34048467bbdff945b612bc3eba00b13c0baf
|
dd9de22427fd78910bdb6bff79b69dfb39d233d1
|
/accounts/urls.py
|
7f25807c9702aac4b0e2e9c2dae99ea84b018267
|
[] |
no_license
|
sulembutproton/joinsys
|
a21162d9d887194d3f252fc14da8adf538bd5c30
|
729fd046446b9389dab3a3cca25a50ddfb173af0
|
refs/heads/master
| 2023-04-17T01:59:14.639572
| 2021-04-28T17:44:25
| 2021-04-28T17:44:25
| 362,556,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
from django.contrib.auth import views as auth_views
from django.urls import path
from .import views
urlpatterns = [
path('register/', views.Register.as_view(), name='register'),
path('login/', views.user_login, name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='accounts/logout.html'), name='logout'),
path('settings/<int:pk>', views.AccountSettings.as_view(), name='settings'),
path('dashboard/', views.Dashboard.as_view(), name='dashboard'),
]
|
[
"sulembut@protonmail.com"
] |
sulembut@protonmail.com
|
cba9ce61d1502a557bd65c9c39a8c4939f68e3e6
|
e972dc486e62152981177f85b5f9cff919ac0867
|
/et_wiki/wiki_comp.py
|
a9e8c24fbdb4fa2fc5c2dca405063eee2b8bf36a
|
[] |
no_license
|
yeongsunpark/cute
|
d81b9b03f747f65bed742b10b2f9a59f69efea96
|
d69f918f9a1f1d6db70bc62272fc0ce582d7bf50
|
refs/heads/master
| 2020-03-27T12:43:41.728918
| 2019-04-29T04:41:47
| 2019-04-29T04:41:47
| 146,564,948
| 0
| 2
| null | 2018-11-06T07:45:59
| 2018-08-29T07:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
f2 = open("new_chunk.txt", "w")
with open("/data1/we_kor/kowiki_pages_170620_sent_chunk_10.tsv", "r") as f:
for line in f:
item = line.split("\t")
title = item[1]
f2.write(title)
f2.write("\n")
f2.close()
|
[
"ylunar@naver.com"
] |
ylunar@naver.com
|
854c291b441bfdbf6c527767955aed060484ef1c
|
a5d22c99e781270317078f8980c934bcc71e6e8b
|
/neodroidvision/detection/single_stage/ssd/bounding_boxes/__init__.py
|
39a56c4c514c00d0e79606e1f538b09770dd807b
|
[
"Apache-2.0"
] |
permissive
|
aivclab/vision
|
dda3b30648b01c2639d64a016b8dbcfccb87b27f
|
06839b08d8e8f274c02a6bcd31bf1b32d3dc04e4
|
refs/heads/master
| 2023-08-21T22:35:10.114394
| 2022-11-02T10:14:08
| 2022-11-02T10:14:08
| 172,566,233
| 1
| 3
|
Apache-2.0
| 2023-08-16T05:11:30
| 2019-02-25T19:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 05/03/2020
"""
from .conversion import *
from .ssd_priors import *
from .ssd_transforms import *
from .tensor_metrics import *
|
[
"christian.heider@alexandra.dk"
] |
christian.heider@alexandra.dk
|
a9045e150a1a97f2d4d88e005f75da3043df176e
|
f2e97b979c648b3b121ff0f8c37cf6ae998fa513
|
/python/compare_thread_asyncio.py
|
8345af6a5fd57ddce861f2e20365a413f2ddb79c
|
[] |
no_license
|
LeonKennedy/LearningByLanguage
|
64564271a323809ab27bd09b2d142a485d013ce2
|
51d44f54b13c2e0202f9986f3556ad7f93e55e7c
|
refs/heads/master
| 2022-11-10T22:46:07.917794
| 2022-11-08T10:48:52
| 2022-11-08T10:48:52
| 52,009,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# @Filename: spinner_thread.py
# @Author: olenji - lionhe0119@hotmail.com
# @Description: 多线程和异步的对比
# @Create: 2018-12-10 10:29:31
# @Last Modified: 2018-12-10 10:29:31
import threading, asyncio
import itertools, time, sys, pdb
# -------------- threading --------------
class Signal:
go = True
def spin(msg, signal):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = char + ' ' + msg
write(status)
flush()
write('\x08' * len(status))
time.sleep(.1)
if not signal.go:
break
write(' ' * len(status) + '\x08' * len(status))
def slow_function():
time.sleep(3)
return 32
def supervisor():
signal = Signal()
spinner = threading.Thread(target=spin,
args=('thinking!olenji', signal))
print('spinner object:', spinner)
spinner.start()
result = slow_function()
signal.go = False
spinner.join()
return result
def main():
result = supervisor()
print('Answer:', result)
# ------------- asyncio --------------
@asyncio.coroutine
def spin_async(msg):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = char + ' ' + msg
write(status)
flush()
write('\x08' * len(status))
try:
yield from asyncio.sleep(.1)
except asyncio.CancelledError:
break
write(' ' * len(status) + '\x08' * len(status))
@asyncio.coroutine
def slow_function():
yield from asyncio.sleep(3) # sleep without blocking
return 42
@asyncio.coroutine
def supervisor_async():
spinner = asyncio.async(spin_async('thinking!'))
print('spinner object:', spinner)
result = yield from slow_function()
spinner.cancel() # Task对象课可以取消
return result
def main_async():
loop = asyncio.get_event_loop()
result = loop.run_until_complete(supervisor_async())
loop.close()
print('Answer:', result)
if __name__ == '__main__':
main_async()
|
[
"lionhe0119@hotmail.com"
] |
lionhe0119@hotmail.com
|
370bba158aa6b9ed78e5e53ff1ec9aece224f346
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03612/s889391086.py
|
70896f9cae948f4c33f1f7fbb4659992722e6cbd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
n = int(input())
*p, = map(int, input().split())
q = [True if p[i] != i+1 else False for i in range(n)] + [True]
ans = 0
for i in range(n):
if not q[i]:
ans += 1
q[i] = q[i+1] = True
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.